FileDocCategorySizeDatePackage
MultiReader.javaAPI DocApache Lucene 2.2.013970Sat Jun 16 22:20:36 BST 2007org.apache.lucene.index

MultiReader

public class MultiReader extends IndexReader
An IndexReader which reads multiple indexes, appending their content.
version
$Id: MultiReader.java 523302 2007-03-28 12:58:15Z gsingers $

Fields Summary
private IndexReader[]
subReaders
private int[]
starts
private Hashtable
normsCache
private int
maxDoc
private int
numDocs
private boolean
hasDeletions
private byte[]
ones
Constructors Summary
public MultiReader(IndexReader[] subReaders)

Construct a MultiReader aggregating the named set of (sub)readers. Directory locking for delete, undeleteAll, and setNorm operations is left to the subreaders.

Note that all subreaders are closed if this Multireader is closed.

param
subReaders set of (sub)readers
throws
IOException


                                            
       
    super(subReaders.length == 0 ? null : subReaders[0].directory());
    initialize(subReaders);
  
MultiReader(Directory directory, SegmentInfos sis, boolean closeDirectory, IndexReader[] subReaders)
Construct reading the named set of readers.

    super(directory, sis, closeDirectory);
    initialize(subReaders);
  
Methods Summary
protected synchronized voiddoClose()

    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].close();
  
protected voiddoCommit()

    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].commit();
  
protected voiddoDelete(int n)

    numDocs = -1;                             // invalidate cache
    int i = readerIndex(n);                   // find segment num
    subReaders[i].deleteDocument(n - starts[i]);      // dispatch to segment reader
    hasDeletions = true;
  
protected voiddoSetNorm(int n, java.lang.String field, byte value)

    normsCache.remove(field);                         // clear cache
    int i = readerIndex(n);                           // find segment num
    subReaders[i].setNorm(n-starts[i], field, value); // dispatch
  
protected voiddoUndeleteAll()

    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].undeleteAll();

    hasDeletions = false;
    numDocs = -1;                                 // invalidate cache
  
public intdocFreq(org.apache.lucene.index.Term t)

    ensureOpen();
    int total = 0;          // sum freqs in segments
    for (int i = 0; i < subReaders.length; i++)
      total += subReaders[i].docFreq(t);
    return total;
  
public org.apache.lucene.document.Documentdocument(int n, org.apache.lucene.document.FieldSelector fieldSelector)

    ensureOpen();
    int i = readerIndex(n);                          // find segment num
    return subReaders[i].document(n - starts[i], fieldSelector);    // dispatch to segment reader
  
private byte[]fakeNorms()

    if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
    return ones;
  
public java.util.CollectiongetFieldNames(org.apache.lucene.index.IndexReader$FieldOption fieldNames)

    // maintain a unique set of field names
    ensureOpen();
    Set fieldSet = new HashSet();
    for (int i = 0; i < subReaders.length; i++) {
      IndexReader reader = subReaders[i];
      Collection names = reader.getFieldNames(fieldNames);
      fieldSet.addAll(names);
    }
    return fieldSet;
  
public org.apache.lucene.index.TermFreqVectorgetTermFreqVector(int n, java.lang.String field)

    ensureOpen();
    int i = readerIndex(n);        // find segment num
    return subReaders[i].getTermFreqVector(n - starts[i], field);
  
public org.apache.lucene.index.TermFreqVector[]getTermFreqVectors(int n)

    ensureOpen();
    int i = readerIndex(n);        // find segment num
    return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
  
public booleanhasDeletions()

    // Don't call ensureOpen() here (it could affect performance)
    return hasDeletions;
  
public booleanhasNorms(java.lang.String field)

    ensureOpen();
    for (int i = 0; i < subReaders.length; i++) {
      if (subReaders[i].hasNorms(field)) return true;
    }
    return false;
  
private voidinitialize(org.apache.lucene.index.IndexReader[] subReaders)

    this.subReaders = subReaders;
    starts = new int[subReaders.length + 1];    // build starts array
    for (int i = 0; i < subReaders.length; i++) {
      starts[i] = maxDoc;
      maxDoc += subReaders[i].maxDoc();      // compute maxDocs

      if (subReaders[i].hasDeletions())
        hasDeletions = true;
    }
    starts[subReaders.length] = maxDoc;
  
public booleanisDeleted(int n)

    // Don't call ensureOpen() here (it could affect performance)
    int i = readerIndex(n);                           // find segment num
    return subReaders[i].isDeleted(n - starts[i]);    // dispatch to segment reader
  
public intmaxDoc()

    // Don't call ensureOpen() here (it could affect performance)
    return maxDoc;
  
public synchronized byte[]norms(java.lang.String field)

    ensureOpen();
    byte[] bytes = (byte[])normsCache.get(field);
    if (bytes != null)
      return bytes;          // cache hit
    if (!hasNorms(field))
      return fakeNorms();

    bytes = new byte[maxDoc()];
    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].norms(field, bytes, starts[i]);
    normsCache.put(field, bytes);      // update cache
    return bytes;
  
public synchronized voidnorms(java.lang.String field, byte[] result, int offset)

    ensureOpen();
    byte[] bytes = (byte[])normsCache.get(field);
    if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
    if (bytes != null)                            // cache hit
      System.arraycopy(bytes, 0, result, offset, maxDoc());

    for (int i = 0; i < subReaders.length; i++)      // read from segments
      subReaders[i].norms(field, result, offset + starts[i]);
  
public synchronized intnumDocs()

    // Don't call ensureOpen() here (it could affect performance)
    if (numDocs == -1) {        // check cache
      int n = 0;                // cache miss--recompute
      for (int i = 0; i < subReaders.length; i++)
        n += subReaders[i].numDocs();      // sum from readers
      numDocs = n;
    }
    return numDocs;
  
private intreaderIndex(int n)

    // find reader for doc n:
    int lo = 0;                                      // search starts array
    int hi = subReaders.length - 1;                  // for first element less

    while (hi >= lo) {
      int mid = (lo + hi) >> 1;
      int midValue = starts[mid];
      if (n < midValue)
        hi = mid - 1;
      else if (n > midValue)
        lo = mid + 1;
      else {                                      // found a match
        while (mid+1 < subReaders.length && starts[mid+1] == midValue) {
          mid++;                                  // scan to last match
        }
        return mid;
      }
    }
    return hi;
  
voidrollbackCommit()

    super.rollbackCommit();
    for (int i = 0; i < subReaders.length; i++) {
      subReaders[i].rollbackCommit();
    }
  
voidstartCommit()

    super.startCommit();
    for (int i = 0; i < subReaders.length; i++) {
      subReaders[i].startCommit();
    }
  
public org.apache.lucene.index.TermDocstermDocs()

    ensureOpen();
    return new MultiTermDocs(subReaders, starts);
  
public org.apache.lucene.index.TermPositionstermPositions()

    ensureOpen();
    return new MultiTermPositions(subReaders, starts);
  
public org.apache.lucene.index.TermEnumterms()

    ensureOpen();
    return new MultiTermEnum(subReaders, starts, null);
  
public org.apache.lucene.index.TermEnumterms(org.apache.lucene.index.Term term)

    ensureOpen();
    return new MultiTermEnum(subReaders, starts, term);