Methods Summary |
---|
private void | aquireWriteLock()Tries to acquire the WriteLock on this directory.
this method is only valid if this IndexReader is directory owner.
if (stale)
throw new IOException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
if (writeLock == null) {
Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
if (!writeLock.obtain(IndexWriter.WRITE_LOCK_TIMEOUT)) // obtain write lock
throw new IOException("Index locked for write: " + writeLock);
this.writeLock = writeLock;
// we have to check whether index has changed since this reader was opened.
// if so, this reader is no longer valid for deletion
if (SegmentInfos.readCurrentVersion(directory) > segmentInfos.getVersion()) {
stale = true;
this.writeLock.release();
this.writeLock = null;
throw new IOException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
}
}
|
public final synchronized void | close()Closes files associated with this index.
Also saves any new deletions to disk.
No other methods should be called after this has been called.
commit();
doClose();
if(closeDirectory)
directory.close();
|
protected final synchronized void | commit()Commit changes resulting from delete, undeleteAll, or setNorm operations
if(hasChanges){
if(directoryOwner){
synchronized (directory) { // in- & inter-process sync
new Lock.With(directory.makeLock(IndexWriter.COMMIT_LOCK_NAME),
IndexWriter.COMMIT_LOCK_TIMEOUT) {
public Object doBody() throws IOException {
doCommit();
segmentInfos.write(directory);
return null;
}
}.run();
}
if (writeLock != null) {
writeLock.release(); // release write lock
writeLock = null;
}
}
else
doCommit();
}
hasChanges = false;
|
public final synchronized void | deleteDocument(int docNum)Deletes the document numbered docNum . Once a document is
deleted it will not appear in TermDocs or TermPostitions enumerations.
Attempts to read its field with the {@link #document}
method will result in an error. The presence of this document may still be
reflected in the {@link #docFreq} statistic, though
this will be corrected eventually as the index is further modified.
if(directoryOwner)
aquireWriteLock();
doDelete(docNum);
hasChanges = true;
|
public final int | deleteDocuments(org.apache.lucene.index.Term term)Deletes all documents containing term .
This is useful if one uses a document field to hold a unique ID string for
the document. Then to delete such a document, one merely constructs a
term with the appropriate field and the unique ID string as its text and
passes it to this method.
See {@link #deleteDocument(int)} for information about when this deletion will
become effective.
TermDocs docs = termDocs(term);
if (docs == null) return 0;
int n = 0;
try {
while (docs.next()) {
deleteDocument(docs.doc());
n++;
}
} finally {
docs.close();
}
return n;
|
public org.apache.lucene.store.Directory | directory()Returns the directory this index resides in. return directory;
|
protected abstract void | doClose()Implements close.
|
protected abstract void | doCommit()Implements commit.
|
protected abstract void | doDelete(int docNum)Implements deletion of the document numbered docNum .
Applications should call {@link #deleteDocument(int)} or {@link #deleteDocuments(Term)}.
|
protected abstract void | doSetNorm(int doc, java.lang.String field, byte value)Implements setNorm in subclass.
|
protected abstract void | doUndeleteAll()Implements actual undeleteAll() in subclass.
|
public abstract int | docFreq(org.apache.lucene.index.Term t)Returns the number of documents containing the term t .
|
public abstract org.apache.lucene.document.Document | document(int n)Returns the stored fields of the n th
Document in this index.
|
protected void | finalize()Release the write lock, if needed.
if (writeLock != null) {
writeLock.release(); // release write lock
writeLock = null;
}
|
public static long | getCurrentVersion(java.lang.String directory)Reads version number from segments files. The version number is
initialized with a timestamp and then increased by one for each change of
the index.
return getCurrentVersion(new File(directory));
|
public static long | getCurrentVersion(java.io.File directory)Reads version number from segments files. The version number is
initialized with a timestamp and then increased by one for each change of
the index.
Directory dir = FSDirectory.getDirectory(directory, false);
long version = getCurrentVersion(dir);
dir.close();
return version;
|
public static long | getCurrentVersion(org.apache.lucene.store.Directory directory)Reads version number from segments files. The version number is
initialized with a timestamp and then increased by one for each change of
the index.
synchronized (directory) { // in- & inter-process sync
Lock commitLock=directory.makeLock(IndexWriter.COMMIT_LOCK_NAME);
boolean locked=false;
try {
locked=commitLock.obtain(IndexWriter.COMMIT_LOCK_TIMEOUT);
return SegmentInfos.readCurrentVersion(directory);
} finally {
if (locked) {
commitLock.release();
}
}
}
|
public abstract java.util.Collection | getFieldNames(org.apache.lucene.index.IndexReader$FieldOption fldOption)Get a list of unique field names that exist in this index and have the specified
field option information.
|
public abstract org.apache.lucene.index.TermFreqVector | getTermFreqVector(int docNumber, java.lang.String field)Return a term frequency vector for the specified document and field. The
returned vector contains terms and frequencies for the terms in
the specified field of this document, if the field had the storeTermVector
flag set. If termvectors had been stored with positions or offsets, a
TermPositionsVector is returned.
|
public abstract org.apache.lucene.index.TermFreqVector[] | getTermFreqVectors(int docNumber)Return an array of term frequency vectors for the specified document.
The array contains a vector for each vectorized field in the document.
Each vector contains terms and frequencies for all terms in a given vectorized field.
If no such fields existed, the method returns null. The term vectors that are
returned my either be of type TermFreqVector or of type TermPositionsVector if
positions or offsets have been stored.
|
public long | getVersion()Version number when this IndexReader was opened.
return segmentInfos.getVersion();
|
public abstract boolean | hasDeletions()Returns true if any documents have been deleted
|
public boolean | hasNorms(java.lang.String field)Returns true if there are norms stored for this field.
// backward compatible implementation.
// SegmentReader has an efficient implementation.
return norms(field) != null;
|
public static boolean | indexExists(java.lang.String directory)Returns true if an index exists at the specified directory.
If the directory does not exist or if there is no index in it.
false is returned.
return (new File(directory, IndexFileNames.SEGMENTS)).exists();
|
public static boolean | indexExists(java.io.File directory)Returns true if an index exists at the specified directory.
If the directory does not exist or if there is no index in it.
return (new File(directory, IndexFileNames.SEGMENTS)).exists();
|
public static boolean | indexExists(org.apache.lucene.store.Directory directory)Returns true if an index exists at the specified directory.
If the directory does not exist or if there is no index in it.
return directory.fileExists(IndexFileNames.SEGMENTS);
|
void | init(org.apache.lucene.store.Directory directory, org.apache.lucene.index.SegmentInfos segmentInfos, boolean closeDirectory, boolean directoryOwner)
this.directory = directory;
this.segmentInfos = segmentInfos;
this.directoryOwner = directoryOwner;
this.closeDirectory = closeDirectory;
|
public boolean | isCurrent()Check whether this IndexReader still works on a current version of the index.
If this is not the case you will need to re-open the IndexReader to
make sure you see the latest changes made to the index.
synchronized (directory) { // in- & inter-process sync
Lock commitLock=directory.makeLock(IndexWriter.COMMIT_LOCK_NAME);
boolean locked=false;
try {
locked=commitLock.obtain(IndexWriter.COMMIT_LOCK_TIMEOUT);
return SegmentInfos.readCurrentVersion(directory) == segmentInfos.getVersion();
} finally {
if (locked) {
commitLock.release();
}
}
}
|
public abstract boolean | isDeleted(int n)Returns true if document n has been deleted
|
public static boolean | isLocked(org.apache.lucene.store.Directory directory)Returns true iff the index in the named directory is
currently locked.
return
directory.makeLock(IndexWriter.WRITE_LOCK_NAME).isLocked() ||
directory.makeLock(IndexWriter.COMMIT_LOCK_NAME).isLocked();
|
public static boolean | isLocked(java.lang.String directory)Returns true iff the index in the named directory is
currently locked.
Directory dir = FSDirectory.getDirectory(directory, false);
boolean result = isLocked(dir);
dir.close();
return result;
|
public static long | lastModified(java.io.File directory)Returns the time the index in the named directory was last modified.
Do not use this to check whether the reader is still up-to-date, use
{@link #isCurrent()} instead.
return FSDirectory.fileModified(directory, IndexFileNames.SEGMENTS);
|
public static long | lastModified(org.apache.lucene.store.Directory directory)Returns the time the index in the named directory was last modified.
Do not use this to check whether the reader is still up-to-date, use
{@link #isCurrent()} instead.
return directory.fileModified(IndexFileNames.SEGMENTS);
|
public static long | lastModified(java.lang.String directory)Returns the time the index in the named directory was last modified.
Do not use this to check whether the reader is still up-to-date, use
{@link #isCurrent()} instead.
return lastModified(new File(directory));
|
public static void | main(java.lang.String[] args)Prints the filename and size of each file within a given compound file.
Add the -extract flag to extract files to the current working directory.
In order to make the extracted version of the index work, you have to copy
the segments file from the compound index into the directory where the extracted files are stored.
String filename = null;
boolean extract = false;
for (int i = 0; i < args.length; ++i) {
if (args[i].equals("-extract")) {
extract = true;
} else if (filename == null) {
filename = args[i];
}
}
if (filename == null) {
System.out.println("Usage: org.apache.lucene.index.IndexReader [-extract] <cfsfile>");
return;
}
Directory dir = null;
CompoundFileReader cfr = null;
try {
File file = new File(filename);
String dirname = file.getAbsoluteFile().getParent();
filename = file.getName();
dir = FSDirectory.getDirectory(dirname, false);
cfr = new CompoundFileReader(dir, filename);
String [] files = cfr.list();
Arrays.sort(files); // sort the array of filename so that the output is more readable
for (int i = 0; i < files.length; ++i) {
long len = cfr.fileLength(files[i]);
if (extract) {
System.out.println("extract " + files[i] + " with " + len + " bytes to local directory...");
IndexInput ii = cfr.openInput(files[i]);
FileOutputStream f = new FileOutputStream(files[i]);
// read and write with a small buffer, which is more effectiv than reading byte by byte
byte[] buffer = new byte[1024];
int chunk = buffer.length;
while(len > 0) {
final int bufLen = (int) Math.min(chunk, len);
ii.readBytes(buffer, 0, bufLen);
f.write(buffer, 0, bufLen);
len -= bufLen;
}
f.close();
ii.close();
}
else
System.out.println(files[i] + ": " + len + " bytes");
}
} catch (IOException ioe) {
ioe.printStackTrace();
}
finally {
try {
if (dir != null)
dir.close();
if (cfr != null)
cfr.close();
}
catch (IOException ioe) {
ioe.printStackTrace();
}
}
|
public abstract int | maxDoc()Returns one greater than the largest possible document number.
This may be used to, e.g., determine how big to allocate an array which
will have an element for every document number in an index.
|
public abstract byte[] | norms(java.lang.String field)Returns the byte-encoded normalization factor for the named field of
every document. This is used by the search code to score documents.
|
public abstract void | norms(java.lang.String field, byte[] bytes, int offset)Reads the byte-encoded normalization factor for the named field of every
document. This is used by the search code to score documents.
|
public abstract int | numDocs()Returns the number of documents in this index.
|
public static org.apache.lucene.index.IndexReader | open(java.lang.String path)Returns an IndexReader reading the index in an FSDirectory in the named
path.
return open(FSDirectory.getDirectory(path, false), true);
|
public static org.apache.lucene.index.IndexReader | open(java.io.File path)Returns an IndexReader reading the index in an FSDirectory in the named
path.
return open(FSDirectory.getDirectory(path, false), true);
|
public static org.apache.lucene.index.IndexReader | open(org.apache.lucene.store.Directory directory)Returns an IndexReader reading the index in the given Directory.
return open(directory, false);
|
private static org.apache.lucene.index.IndexReader | open(org.apache.lucene.store.Directory directory, boolean closeDirectory)
synchronized (directory) { // in- & inter-process sync
return (IndexReader)new Lock.With(
directory.makeLock(IndexWriter.COMMIT_LOCK_NAME),
IndexWriter.COMMIT_LOCK_TIMEOUT) {
public Object doBody() throws IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(directory);
if (infos.size() == 1) { // index is optimized
return SegmentReader.get(infos, infos.info(0), closeDirectory);
}
IndexReader[] readers = new IndexReader[infos.size()];
for (int i = 0; i < infos.size(); i++)
readers[i] = SegmentReader.get(infos.info(i));
return new MultiReader(directory, infos, closeDirectory, readers);
}
}.run();
}
|
public final synchronized void | setNorm(int doc, java.lang.String field, byte value)Expert: Resets the normalization factor for the named field of the named
document. The norm represents the product of the field's {@link
Field#setBoost(float) boost} and its {@link Similarity#lengthNorm(String,
int) length normalization}. Thus, to preserve the length normalization
values when resetting this, one should base the new value upon the old.
if(directoryOwner)
aquireWriteLock();
doSetNorm(doc, field, value);
hasChanges = true;
|
public void | setNorm(int doc, java.lang.String field, float value)Expert: Resets the normalization factor for the named field of the named
document.
setNorm(doc, field, Similarity.encodeNorm(value));
|
public org.apache.lucene.index.TermDocs | termDocs(org.apache.lucene.index.Term term)Returns an enumeration of all the documents which contain
term . For each document, the document number, the frequency of
the term in that document is also provided, for use in search scoring.
Thus, this method implements the mapping:
The enumeration is ordered by document number. Each document number
is greater than all that precede it in the enumeration.
TermDocs termDocs = termDocs();
termDocs.seek(term);
return termDocs;
|
public abstract org.apache.lucene.index.TermDocs | termDocs()Returns an unpositioned {@link TermDocs} enumerator.
|
public org.apache.lucene.index.TermPositions | termPositions(org.apache.lucene.index.Term term)Returns an enumeration of all the documents which contain
term . For each document, in addition to the document number
and frequency of the term in that document, a list of all of the ordinal
positions of the term in the document is available. Thus, this method
implements the mapping:
Term => <docNum, freq,
<pos1, pos2, ...
posfreq-1>
>*
This positional information faciliates phrase and proximity searching.
The enumeration is ordered by document number. Each document number is
greater than all that precede it in the enumeration.
TermPositions termPositions = termPositions();
termPositions.seek(term);
return termPositions;
|
public abstract org.apache.lucene.index.TermPositions | termPositions()Returns an unpositioned {@link TermPositions} enumerator.
|
public abstract org.apache.lucene.index.TermEnum | terms()Returns an enumeration of all the terms in the index.
The enumeration is ordered by Term.compareTo(). Each term
is greater than all that precede it in the enumeration.
|
public abstract org.apache.lucene.index.TermEnum | terms(org.apache.lucene.index.Term t)Returns an enumeration of all terms after a given term.
The enumeration is ordered by Term.compareTo(). Each term
is greater than all that precede it in the enumeration.
|
public final synchronized void | undeleteAll()Undeletes all documents currently marked as deleted in this index.
if(directoryOwner)
aquireWriteLock();
doUndeleteAll();
hasChanges = true;
|
public static void | unlock(org.apache.lucene.store.Directory directory)Forcibly unlocks the index in the named directory.
Caution: this should only be used by failure recovery code,
when it is known that no other process nor thread is in fact
currently accessing this index.
directory.makeLock(IndexWriter.WRITE_LOCK_NAME).release();
directory.makeLock(IndexWriter.COMMIT_LOCK_NAME).release();
|