Methods Summary |
---|
protected synchronized void | doClose()
for (int i = 0; i < subReaders.length; i++)
subReaders[i].close();
|
protected void | doCommit()
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit();
|
protected void | doDelete(int n)
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].delete(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
|
protected void | doSetNorm(int n, java.lang.String field, byte value)
normsCache.remove(field); // clear cache
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
|
protected void | doUndeleteAll()
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
|
public int | docFreq(org.apache.lucene.index.Term t)
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
|
public org.apache.lucene.document.Document | document(int n)
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i]); // dispatch to segment reader
|
public java.util.Collection | getFieldNames()
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getFieldNames();
// iterate through the field names and add them to the set
for (Iterator iterator = names.iterator(); iterator.hasNext();) {
String s = (String) iterator.next();
fieldSet.add(s);
}
}
return fieldSet;
|
public java.util.Collection | getFieldNames(boolean indexed)
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getFieldNames(indexed);
fieldSet.addAll(names);
}
return fieldSet;
|
public java.util.Collection | getIndexedFieldNames(boolean storedTermVector)
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getIndexedFieldNames(storedTermVector);
fieldSet.addAll(names);
}
return fieldSet;
|
public org.apache.lucene.index.TermFreqVector | getTermFreqVector(int n, java.lang.String field)
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
|
public org.apache.lucene.index.TermFreqVector[] | getTermFreqVectors(int n)Return an array of term frequency vectors for the specified document.
The array contains a vector for each vectorized field in the document.
Each vector vector contains term numbers and frequencies for all terms
in a given vectorized field.
If no such fields existed, the method returns null.
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
|
public boolean | hasDeletions() return hasDeletions;
|
private void | initialize(org.apache.lucene.index.IndexReader[] subReaders)
this.subReaders = subReaders;
starts = new int[subReaders.length + 1]; // build starts array
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
|
public boolean | isDeleted(int n)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
|
public int | maxDoc()
return maxDoc;
|
public synchronized byte[] | norms(java.lang.String field)
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
|
public synchronized void | norms(java.lang.String field, byte[] result, int offset)
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null) // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
|
public synchronized int | numDocs()
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
|
private int | readerIndex(int n) // find reader for doc n:
int lo = 0; // search starts array
int hi = subReaders.length - 1; // for first element less
while (hi >= lo) {
int mid = (lo + hi) >> 1;
int midValue = starts[mid];
if (n < midValue)
hi = mid - 1;
else if (n > midValue)
lo = mid + 1;
else { // found a match
while (mid+1 < subReaders.length && starts[mid+1] == midValue) {
mid++; // scan to last match
}
return mid;
}
}
return hi;
|
public org.apache.lucene.index.TermDocs | termDocs()
return new MultiTermDocs(subReaders, starts);
|
public org.apache.lucene.index.TermPositions | termPositions()
return new MultiTermPositions(subReaders, starts);
|
public org.apache.lucene.index.TermEnum | terms()
return new MultiTermEnum(subReaders, starts, null);
|
public org.apache.lucene.index.TermEnum | terms(org.apache.lucene.index.Term term)
return new MultiTermEnum(subReaders, starts, term);
|