Methods Summary |
---|
public void | addDocument(org.apache.lucene.document.Document doc)Adds a document to this index. If the document contains more than
{@link #setMaxFieldLength(int)} terms for a given field, the remainder are
discarded.
addDocument(doc, null);
|
public void | addDocument(org.apache.lucene.document.Document doc, org.apache.lucene.analysis.Analyzer docAnalyzer)Adds a document to this index, using the provided analyzer instead of the
one specific in the constructor. If the document contains more than
{@link #setMaxFieldLength(int)} terms for a given field, the remainder are
discarded.
synchronized(directory) {
assureOpen();
createIndexWriter();
if (docAnalyzer != null)
indexWriter.addDocument(doc, docAnalyzer);
else
indexWriter.addDocument(doc);
}
|
protected void | assureOpen()Throw an IllegalStateException if the index is closed.
if (!open) {
throw new IllegalStateException("Index is closed");
}
|
public void | close()Close this index, writing all pending changes to disk.
synchronized(directory) {
if (!open)
throw new IllegalStateException("Index is closed already");
if (indexWriter != null) {
indexWriter.close();
indexWriter = null;
} else {
indexReader.close();
indexReader = null;
}
open = false;
}
|
protected void | createIndexReader()Close the IndexWriter and open an IndexReader.
if (indexReader == null) {
if (indexWriter != null) {
indexWriter.close();
indexWriter = null;
}
indexReader = IndexReader.open(directory);
}
|
protected void | createIndexWriter()Close the IndexReader and open an IndexWriter.
if (indexWriter == null) {
if (indexReader != null) {
indexReader.close();
indexReader = null;
}
indexWriter = new IndexWriter(directory, analyzer, false);
indexWriter.setInfoStream(infoStream);
indexWriter.setUseCompoundFile(useCompoundFile);
indexWriter.setMaxBufferedDocs(maxBufferedDocs);
indexWriter.setMaxFieldLength(maxFieldLength);
indexWriter.setMergeFactor(mergeFactor);
}
|
public void | deleteDocument(int docNum)Deletes the document numbered docNum .
synchronized(directory) {
assureOpen();
createIndexReader();
indexReader.deleteDocument(docNum);
}
|
public int | deleteDocuments(org.apache.lucene.index.Term term)Deletes all documents containing term .
This is useful if one uses a document field to hold a unique ID string for
the document. Then to delete such a document, one merely constructs a
term with the appropriate field and the unique ID string as its text and
passes it to this method. Returns the number of documents deleted.
synchronized(directory) {
assureOpen();
createIndexReader();
return indexReader.deleteDocuments(term);
}
|
public int | docCount()Returns the number of documents currently in this index.
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
return indexWriter.docCount();
} else {
return indexReader.numDocs();
}
}
|
public void | flush()Make sure all changes are written to disk.
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
indexWriter.close();
indexWriter = null;
createIndexWriter();
} else {
indexReader.close();
indexReader = null;
createIndexReader();
}
}
|
public java.io.PrintStream | getInfoStream()
synchronized(directory) {
assureOpen();
createIndexWriter();
return indexWriter.getInfoStream();
}
|
public int | getMaxBufferedDocs()
synchronized(directory) {
assureOpen();
createIndexWriter();
return indexWriter.getMaxBufferedDocs();
}
|
public int | getMaxFieldLength()
synchronized(directory) {
assureOpen();
createIndexWriter();
return indexWriter.getMaxFieldLength();
}
|
public int | getMergeFactor()
synchronized(directory) {
assureOpen();
createIndexWriter();
return indexWriter.getMergeFactor();
}
|
public boolean | getUseCompoundFile()
synchronized(directory) {
assureOpen();
createIndexWriter();
return indexWriter.getUseCompoundFile();
}
|
protected void | init(org.apache.lucene.store.Directory directory, org.apache.lucene.analysis.Analyzer analyzer, boolean create)Initialize an IndexWriter.
this.directory = directory;
synchronized(this.directory) {
this.analyzer = analyzer;
indexWriter = new IndexWriter(directory, analyzer, create);
open = true;
}
|
public void | optimize()Merges all segments together into a single segment, optimizing an index
for search.
synchronized(directory) {
assureOpen();
createIndexWriter();
indexWriter.optimize();
}
|
public void | setInfoStream(java.io.PrintStream infoStream)If non-null, information about merges and a message when
{@link #getMaxFieldLength()} is reached will be printed to this.
Example: index.setInfoStream(System.err);
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
indexWriter.setInfoStream(infoStream);
}
this.infoStream = infoStream;
}
|
public void | setMaxBufferedDocs(int maxBufferedDocs)Determines the minimal number of documents required before the buffered
in-memory documents are merging and a new Segment is created.
Since Documents are merged in a {@link org.apache.lucene.store.RAMDirectory},
large value gives faster indexing. At the same time, mergeFactor limits
the number of files open in a FSDirectory.
The default value is 10.
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
indexWriter.setMaxBufferedDocs(maxBufferedDocs);
}
this.maxBufferedDocs = maxBufferedDocs;
}
|
public void | setMaxFieldLength(int maxFieldLength)The maximum number of terms that will be indexed for a single field in a
document. This limits the amount of memory required for indexing, so that
collections with very large files will not crash the indexing process by
running out of memory.
Note that this effectively truncates large documents, excluding from the
index terms that occur further in the document. If you know your source
documents are large, be sure to set this value high enough to accomodate
the expected size. If you set it to Integer.MAX_VALUE, then the only limit
is your memory, but you should anticipate an OutOfMemoryError.
By default, no more than 10,000 terms will be indexed for a field.
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
indexWriter.setMaxFieldLength(maxFieldLength);
}
this.maxFieldLength = maxFieldLength;
}
|
public void | setMergeFactor(int mergeFactor)Determines how often segment indices are merged by addDocument(). With
smaller values, less RAM is used while indexing, and searches on
unoptimized indices are faster, but indexing speed is slower. With larger
values, more RAM is used during indexing, and while searches on unoptimized
indices are slower, indexing is faster. Thus larger values (> 10) are best
for batch index creation, and smaller values (< 10) for indices that are
interactively maintained.
This must never be less than 2. The default value is 10.
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
indexWriter.setMergeFactor(mergeFactor);
}
this.mergeFactor = mergeFactor;
}
|
public void | setUseCompoundFile(boolean useCompoundFile)Setting to turn on usage of a compound file. When on, multiple files
for each segment are merged into a single file once the segment creation
is finished. This is done regardless of what directory is in use.
synchronized(directory) {
assureOpen();
if (indexWriter != null) {
indexWriter.setUseCompoundFile(useCompoundFile);
}
this.useCompoundFile = useCompoundFile;
}
|
public java.lang.String | toString()
return "Index@" + directory;
|