Methods Summary |
---|
protected synchronized void | doClose()
for (int i = 0; i < subReaders.length; i++)
subReaders[i].close();
|
protected void | doCommit()
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit();
|
protected void | doDelete(int n)
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
|
protected void | doSetNorm(int n, java.lang.String field, byte value)
normsCache.remove(field); // clear cache
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
|
protected void | doUndeleteAll()
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
|
public int | docFreq(org.apache.lucene.index.Term t)
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
|
public org.apache.lucene.document.Document | document(int n, org.apache.lucene.document.FieldSelector fieldSelector)
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
|
private byte[] | fakeNorms()
if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
return ones;
|
public java.util.Collection | getFieldNames(org.apache.lucene.index.IndexReader$FieldOption fieldNames)
// maintain a unique set of field names
ensureOpen();
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getFieldNames(fieldNames);
fieldSet.addAll(names);
}
return fieldSet;
|
public org.apache.lucene.index.TermFreqVector | getTermFreqVector(int n, java.lang.String field)
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
|
public org.apache.lucene.index.TermFreqVector[] | getTermFreqVectors(int n)
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
|
public boolean | hasDeletions()
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
|
public boolean | hasNorms(java.lang.String field)
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
|
private void | initialize(org.apache.lucene.index.IndexReader[] subReaders)
this.subReaders = subReaders;
starts = new int[subReaders.length + 1]; // build starts array
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
|
public boolean | isDeleted(int n)
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
|
public int | maxDoc()
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
|
public synchronized byte[] | norms(java.lang.String field)
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return fakeNorms();
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
|
public synchronized void | norms(java.lang.String field, byte[] result, int offset)
ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
if (bytes != null) // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
|
public synchronized int | numDocs()
// Don't call ensureOpen() here (it could affect performance)
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
|
private int | readerIndex(int n) // find reader for doc n:
int lo = 0; // search starts array
int hi = subReaders.length - 1; // for first element less
while (hi >= lo) {
int mid = (lo + hi) >> 1;
int midValue = starts[mid];
if (n < midValue)
hi = mid - 1;
else if (n > midValue)
lo = mid + 1;
else { // found a match
while (mid+1 < subReaders.length && starts[mid+1] == midValue) {
mid++; // scan to last match
}
return mid;
}
}
return hi;
|
void | rollbackCommit()
super.rollbackCommit();
for (int i = 0; i < subReaders.length; i++) {
subReaders[i].rollbackCommit();
}
|
void | startCommit()
super.startCommit();
for (int i = 0; i < subReaders.length; i++) {
subReaders[i].startCommit();
}
|
public org.apache.lucene.index.TermDocs | termDocs()
ensureOpen();
return new MultiTermDocs(subReaders, starts);
|
public org.apache.lucene.index.TermPositions | termPositions()
ensureOpen();
return new MultiTermPositions(subReaders, starts);
|
public org.apache.lucene.index.TermEnum | terms()
ensureOpen();
return new MultiTermEnum(subReaders, starts, null);
|
public org.apache.lucene.index.TermEnum | terms(org.apache.lucene.index.Term term)
ensureOpen();
return new MultiTermEnum(subReaders, starts, term);
|