FileDocCategorySizeDatePackage
ParallelReader.javaAPI DocApache Lucene 1.911314Mon Feb 20 09:20:14 GMT 2006org.apache.lucene.index

ParallelReader

public class ParallelReader extends IndexReader
An IndexReader which reads multiple, parallel indexes. Each index added must have the same number of documents, but typically each contains different fields. Each document contains the union of the fields of all documents with the same document number. When searching, matches for a query term are from the first index added that has the field.

This is useful, e.g., with collections that have large fields which change rarely and small fields that change more frequently. The smaller fields may be re-indexed in a new index and both indexes may be searched together.

Warning: It is up to you to make sure all indexes are created and modified the same way. For example, if you add documents to one index, you need to add the same documents in the same order to the other indexes. Failure to do so will result in undefined behavior.

Fields Summary
private List
readers
private SortedMap
fieldToReader
private List
storedFieldReaders
private int
maxDoc
private int
numDocs
private boolean
hasDeletions
Constructors Summary
public ParallelReader()
Construct a ParallelReader.


     
       super(null); 
Methods Summary
public voidadd(org.apache.lucene.index.IndexReader reader)
Add an IndexReader.

    add(reader, false);
  
public voidadd(org.apache.lucene.index.IndexReader reader, boolean ignoreStoredFields)
Add an IndexReader whose stored fields will not be returned. This can accellerate search when stored fields are only needed from a subset of the IndexReaders.

throws
IllegalArgumentException if not all indexes contain the same number of documents
throws
IllegalArgumentException if not all indexes have the same value of {@link IndexReader#maxDoc()}


    if (readers.size() == 0) {
      this.maxDoc = reader.maxDoc();
      this.numDocs = reader.numDocs();
      this.hasDeletions = reader.hasDeletions();
    }

    if (reader.maxDoc() != maxDoc)                // check compatibility
      throw new IllegalArgumentException
        ("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
    if (reader.numDocs() != numDocs)
      throw new IllegalArgumentException
        ("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
    
    Iterator i = reader.getFieldNames(IndexReader.FieldOption.ALL).iterator();
    while (i.hasNext()) {                         // update fieldToReader map
      String field = (String)i.next();
      if (fieldToReader.get(field) == null)
        fieldToReader.put(field, reader);
    }

    if (!ignoreStoredFields)
      storedFieldReaders.add(reader);             // add to storedFieldReaders
    readers.add(reader);
  
protected synchronized voiddoClose()

    for (int i = 0; i < readers.size(); i++)
      ((IndexReader)readers.get(i)).close();
  
protected voiddoCommit()

    for (int i = 0; i < readers.size(); i++)
      ((IndexReader)readers.get(i)).commit();
  
protected voiddoDelete(int n)

    for (int i = 0; i < readers.size(); i++) {
      ((IndexReader)readers.get(i)).doDelete(n);
    }
    hasDeletions = true;
  
protected voiddoSetNorm(int n, java.lang.String field, byte value)

    ((IndexReader)fieldToReader.get(field)).doSetNorm(n, field, value);
  
protected voiddoUndeleteAll()

    for (int i = 0; i < readers.size(); i++) {
      ((IndexReader)readers.get(i)).doUndeleteAll();
    }
    hasDeletions = false;
  
public intdocFreq(org.apache.lucene.index.Term term)

    return ((IndexReader)fieldToReader.get(term.field())).docFreq(term);
  
public org.apache.lucene.document.Documentdocument(int n)

    Document result = new Document();
    for (int i = 0; i < storedFieldReaders.size(); i++) {
      IndexReader reader = (IndexReader)storedFieldReaders.get(i);
      Enumeration fields = reader.document(n).fields();
      while (fields.hasMoreElements()) {
        result.add((Field)fields.nextElement());
      }
    }
    return result;
  
public java.util.CollectiongetFieldNames()

    return fieldToReader.keySet();
  
public java.util.CollectiongetFieldNames(boolean indexed)

    Set fieldSet = new HashSet();
    for (int i = 0; i < readers.size(); i++) {
      IndexReader reader = ((IndexReader)readers.get(i));
      Collection names = reader.getFieldNames(indexed);
      fieldSet.addAll(names);
    }
    return fieldSet;
  
public java.util.CollectiongetFieldNames(org.apache.lucene.index.IndexReader$FieldOption fieldNames)

    Set fieldSet = new HashSet();
    for (int i = 0; i < readers.size(); i++) {
      IndexReader reader = ((IndexReader)readers.get(i));
      Collection names = reader.getFieldNames(fieldNames);
      fieldSet.addAll(names);
    }
    return fieldSet;
  
public java.util.CollectiongetIndexedFieldNames(org.apache.lucene.document.Field$TermVector tvSpec)

    Set fieldSet = new HashSet();
    for (int i = 0; i < readers.size(); i++) {
      IndexReader reader = ((IndexReader)readers.get(i));
      Collection names = reader.getIndexedFieldNames(tvSpec);
      fieldSet.addAll(names);
    }
    return fieldSet;
  
public org.apache.lucene.index.TermFreqVectorgetTermFreqVector(int n, java.lang.String field)

    return ((IndexReader)fieldToReader.get(field)).getTermFreqVector(n, field);
  
public org.apache.lucene.index.TermFreqVector[]getTermFreqVectors(int n)

    ArrayList results = new ArrayList();
    Iterator i = fieldToReader.entrySet().iterator();
    while (i.hasNext()) {
      Map.Entry e = (Map.Entry)i.next();
      IndexReader reader = (IndexReader)e.getKey();
      String field = (String)e.getValue();
      TermFreqVector vector = reader.getTermFreqVector(n, field);
      if (vector != null)
        results.add(vector);
    }
    return (TermFreqVector[])
      results.toArray(new TermFreqVector[results.size()]);
  
public booleanhasDeletions()

 return hasDeletions; 
public booleanhasNorms(java.lang.String field)

    return ((IndexReader)fieldToReader.get(field)).hasNorms(field);
  
public booleanisDeleted(int n)

    if (readers.size() > 0)
      return ((IndexReader)readers.get(0)).isDeleted(n);
    return false;
  
public intmaxDoc()

 return maxDoc; 
public byte[]norms(java.lang.String field)

    return ((IndexReader)fieldToReader.get(field)).norms(field);
  
public voidnorms(java.lang.String field, byte[] result, int offset)

     ((IndexReader)fieldToReader.get(field)).norms(field, result, offset);
  
public intnumDocs()

 return numDocs; 
public org.apache.lucene.index.TermDocstermDocs(org.apache.lucene.index.Term term)

    return new ParallelTermDocs(term);
  
public org.apache.lucene.index.TermDocstermDocs()

    return new ParallelTermDocs();
  
public org.apache.lucene.index.TermPositionstermPositions(org.apache.lucene.index.Term term)

    return new ParallelTermPositions(term);
  
public org.apache.lucene.index.TermPositionstermPositions()

    return new ParallelTermPositions();
  
public org.apache.lucene.index.TermEnumterms()

    return new ParallelTermEnum();
  
public org.apache.lucene.index.TermEnumterms(org.apache.lucene.index.Term term)

    return new ParallelTermEnum(term);