Methods Summary |
---|
public void | add(org.apache.lucene.index.IndexReader reader)Add an IndexReader.
add(reader, false);
|
public void | add(org.apache.lucene.index.IndexReader reader, boolean ignoreStoredFields)Add an IndexReader whose stored fields will not be returned. This can
accellerate search when stored fields are only needed from a subset of
the IndexReaders.
if (readers.size() == 0) {
this.maxDoc = reader.maxDoc();
this.numDocs = reader.numDocs();
this.hasDeletions = reader.hasDeletions();
}
if (reader.maxDoc() != maxDoc) // check compatibility
throw new IllegalArgumentException
("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
if (reader.numDocs() != numDocs)
throw new IllegalArgumentException
("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
Iterator i = reader.getFieldNames(IndexReader.FieldOption.ALL).iterator();
while (i.hasNext()) { // update fieldToReader map
String field = (String)i.next();
if (fieldToReader.get(field) == null)
fieldToReader.put(field, reader);
}
if (!ignoreStoredFields)
storedFieldReaders.add(reader); // add to storedFieldReaders
readers.add(reader);
|
protected synchronized void | doClose()
for (int i = 0; i < readers.size(); i++)
((IndexReader)readers.get(i)).close();
|
protected void | doCommit()
for (int i = 0; i < readers.size(); i++)
((IndexReader)readers.get(i)).commit();
|
protected void | doDelete(int n)
for (int i = 0; i < readers.size(); i++) {
((IndexReader)readers.get(i)).doDelete(n);
}
hasDeletions = true;
|
protected void | doSetNorm(int n, java.lang.String field, byte value)
((IndexReader)fieldToReader.get(field)).doSetNorm(n, field, value);
|
protected void | doUndeleteAll()
for (int i = 0; i < readers.size(); i++) {
((IndexReader)readers.get(i)).doUndeleteAll();
}
hasDeletions = false;
|
public int | docFreq(org.apache.lucene.index.Term term)
return ((IndexReader)fieldToReader.get(term.field())).docFreq(term);
|
public org.apache.lucene.document.Document | document(int n)
Document result = new Document();
for (int i = 0; i < storedFieldReaders.size(); i++) {
IndexReader reader = (IndexReader)storedFieldReaders.get(i);
Enumeration fields = reader.document(n).fields();
while (fields.hasMoreElements()) {
result.add((Field)fields.nextElement());
}
}
return result;
|
public java.util.Collection | getFieldNames()
return fieldToReader.keySet();
|
public java.util.Collection | getFieldNames(boolean indexed)
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = ((IndexReader)readers.get(i));
Collection names = reader.getFieldNames(indexed);
fieldSet.addAll(names);
}
return fieldSet;
|
public java.util.Collection | getFieldNames(org.apache.lucene.index.IndexReader$FieldOption fieldNames)
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = ((IndexReader)readers.get(i));
Collection names = reader.getFieldNames(fieldNames);
fieldSet.addAll(names);
}
return fieldSet;
|
public java.util.Collection | getIndexedFieldNames(org.apache.lucene.document.Field$TermVector tvSpec)
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = ((IndexReader)readers.get(i));
Collection names = reader.getIndexedFieldNames(tvSpec);
fieldSet.addAll(names);
}
return fieldSet;
|
public org.apache.lucene.index.TermFreqVector | getTermFreqVector(int n, java.lang.String field)
return ((IndexReader)fieldToReader.get(field)).getTermFreqVector(n, field);
|
public org.apache.lucene.index.TermFreqVector[] | getTermFreqVectors(int n)
ArrayList results = new ArrayList();
Iterator i = fieldToReader.entrySet().iterator();
while (i.hasNext()) {
Map.Entry e = (Map.Entry)i.next();
IndexReader reader = (IndexReader)e.getKey();
String field = (String)e.getValue();
TermFreqVector vector = reader.getTermFreqVector(n, field);
if (vector != null)
results.add(vector);
}
return (TermFreqVector[])
results.toArray(new TermFreqVector[results.size()]);
|
public boolean | hasDeletions() return hasDeletions;
|
public boolean | hasNorms(java.lang.String field)
return ((IndexReader)fieldToReader.get(field)).hasNorms(field);
|
public boolean | isDeleted(int n)
if (readers.size() > 0)
return ((IndexReader)readers.get(0)).isDeleted(n);
return false;
|
public int | maxDoc() return maxDoc;
|
public byte[] | norms(java.lang.String field)
return ((IndexReader)fieldToReader.get(field)).norms(field);
|
public void | norms(java.lang.String field, byte[] result, int offset)
((IndexReader)fieldToReader.get(field)).norms(field, result, offset);
|
public int | numDocs() return numDocs;
|
public org.apache.lucene.index.TermDocs | termDocs(org.apache.lucene.index.Term term)
return new ParallelTermDocs(term);
|
public org.apache.lucene.index.TermDocs | termDocs()
return new ParallelTermDocs();
|
public org.apache.lucene.index.TermPositions | termPositions(org.apache.lucene.index.Term term)
return new ParallelTermPositions(term);
|
public org.apache.lucene.index.TermPositions | termPositions()
return new ParallelTermPositions();
|
public org.apache.lucene.index.TermEnum | terms()
return new ParallelTermEnum();
|
public org.apache.lucene.index.TermEnum | terms(org.apache.lucene.index.Term term)
return new ParallelTermEnum(term);
|