FileDocCategorySizeDatePackage
MultiReader.javaAPI DocApache Lucene 1.4.313059Mon May 17 14:56:48 BST 2004org.apache.lucene.index

MultiReader

public class MultiReader extends IndexReader
An IndexReader which reads multiple indexes, appending their content.
version
$Id: MultiReader.java,v 1.7 2004/05/17 12:56:47 goller Exp $

Fields Summary
private IndexReader[]
subReaders
private int[]
starts
private Hashtable
normsCache
private int
maxDoc
private int
numDocs
private boolean
hasDeletions
Constructors Summary
public MultiReader(IndexReader[] subReaders)

Construct a MultiReader aggregating the named set of (sub)readers. Directory locking for delete, undeleteAll, and setNorm operations is left to the subreaders.

Note that all subreaders are closed if this Multireader is closed.

param
subReaders set of (sub)readers
throws
IOException


                                            
       
    super(subReaders.length == 0 ? null : subReaders[0].directory());
    initialize(subReaders);
  
MultiReader(Directory directory, SegmentInfos sis, boolean closeDirectory, IndexReader[] subReaders)
Construct reading the named set of readers.

    super(directory, sis, closeDirectory);
    initialize(subReaders);
  
Methods Summary
protected synchronized voiddoClose()

    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].close();
  
protected voiddoCommit()

    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].commit();
  
protected voiddoDelete(int n)

    numDocs = -1;                             // invalidate cache
    int i = readerIndex(n);                   // find segment num
    subReaders[i].delete(n - starts[i]);      // dispatch to segment reader
    hasDeletions = true;
  
protected voiddoSetNorm(int n, java.lang.String field, byte value)

    normsCache.remove(field);                         // clear cache
    int i = readerIndex(n);                           // find segment num
    subReaders[i].setNorm(n-starts[i], field, value); // dispatch
  
protected voiddoUndeleteAll()

    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].undeleteAll();
    hasDeletions = false;
  
public intdocFreq(org.apache.lucene.index.Term t)

    int total = 0;          // sum freqs in segments
    for (int i = 0; i < subReaders.length; i++)
      total += subReaders[i].docFreq(t);
    return total;
  
public org.apache.lucene.document.Documentdocument(int n)

    int i = readerIndex(n);                          // find segment num
    return subReaders[i].document(n - starts[i]);    // dispatch to segment reader
  
public java.util.CollectiongetFieldNames()

see
IndexReader#getFieldNames()

    // maintain a unique set of field names
    Set fieldSet = new HashSet();
    for (int i = 0; i < subReaders.length; i++) {
      IndexReader reader = subReaders[i];
      Collection names = reader.getFieldNames();
      // iterate through the field names and add them to the set
      for (Iterator iterator = names.iterator(); iterator.hasNext();) {
        String s = (String) iterator.next();
        fieldSet.add(s);
      }
    }
    return fieldSet;
  
public java.util.CollectiongetFieldNames(boolean indexed)

see
IndexReader#getFieldNames(boolean)

    // maintain a unique set of field names
    Set fieldSet = new HashSet();
    for (int i = 0; i < subReaders.length; i++) {
      IndexReader reader = subReaders[i];
      Collection names = reader.getFieldNames(indexed);
      fieldSet.addAll(names);
    }
    return fieldSet;
  
public java.util.CollectiongetIndexedFieldNames(boolean storedTermVector)

    // maintain a unique set of field names
    Set fieldSet = new HashSet();
    for (int i = 0; i < subReaders.length; i++) {
      IndexReader reader = subReaders[i];
      Collection names = reader.getIndexedFieldNames(storedTermVector);
      fieldSet.addAll(names);
    }
    return fieldSet;
  
public org.apache.lucene.index.TermFreqVectorgetTermFreqVector(int n, java.lang.String field)

    int i = readerIndex(n);        // find segment num
    return subReaders[i].getTermFreqVector(n - starts[i], field);
  
public org.apache.lucene.index.TermFreqVector[]getTermFreqVectors(int n)
Return an array of term frequency vectors for the specified document. The array contains a vector for each vectorized field in the document. Each vector vector contains term numbers and frequencies for all terms in a given vectorized field. If no such fields existed, the method returns null.

    int i = readerIndex(n);        // find segment num
    return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
  
public booleanhasDeletions()

 return hasDeletions; 
private voidinitialize(org.apache.lucene.index.IndexReader[] subReaders)

    this.subReaders = subReaders;
    starts = new int[subReaders.length + 1];    // build starts array
    for (int i = 0; i < subReaders.length; i++) {
      starts[i] = maxDoc;
      maxDoc += subReaders[i].maxDoc();      // compute maxDocs

      if (subReaders[i].hasDeletions())
        hasDeletions = true;
    }
    starts[subReaders.length] = maxDoc;
  
public booleanisDeleted(int n)

    int i = readerIndex(n);                           // find segment num
    return subReaders[i].isDeleted(n - starts[i]);    // dispatch to segment reader
  
public intmaxDoc()

    return maxDoc;
  
public synchronized byte[]norms(java.lang.String field)

    byte[] bytes = (byte[])normsCache.get(field);
    if (bytes != null)
      return bytes;          // cache hit

    bytes = new byte[maxDoc()];
    for (int i = 0; i < subReaders.length; i++)
      subReaders[i].norms(field, bytes, starts[i]);
    normsCache.put(field, bytes);      // update cache
    return bytes;
  
public synchronized voidnorms(java.lang.String field, byte[] result, int offset)

    byte[] bytes = (byte[])normsCache.get(field);
    if (bytes != null)                            // cache hit
      System.arraycopy(bytes, 0, result, offset, maxDoc());

    for (int i = 0; i < subReaders.length; i++)      // read from segments
      subReaders[i].norms(field, result, offset + starts[i]);
  
public synchronized intnumDocs()

    if (numDocs == -1) {        // check cache
      int n = 0;                // cache miss--recompute
      for (int i = 0; i < subReaders.length; i++)
        n += subReaders[i].numDocs();      // sum from readers
      numDocs = n;
    }
    return numDocs;
  
private intreaderIndex(int n)

    // find reader for doc n:
    int lo = 0;                                      // search starts array
    int hi = subReaders.length - 1;                  // for first element less

    while (hi >= lo) {
      int mid = (lo + hi) >> 1;
      int midValue = starts[mid];
      if (n < midValue)
        hi = mid - 1;
      else if (n > midValue)
        lo = mid + 1;
      else {                                      // found a match
        while (mid+1 < subReaders.length && starts[mid+1] == midValue) {
          mid++;                                  // scan to last match
        }
        return mid;
      }
    }
    return hi;
  
public org.apache.lucene.index.TermDocstermDocs()

    return new MultiTermDocs(subReaders, starts);
  
public org.apache.lucene.index.TermPositionstermPositions()

    return new MultiTermPositions(subReaders, starts);
  
public org.apache.lucene.index.TermEnumterms()

    return new MultiTermEnum(subReaders, starts, null);
  
public org.apache.lucene.index.TermEnumterms(org.apache.lucene.index.Term term)

    return new MultiTermEnum(subReaders, starts, term);