FileDocCategorySizeDatePackage
FieldCacheImpl.javaAPI DocApache Lucene 1.912698Mon Feb 20 09:20:04 GMT 2006org.apache.lucene.search

FieldCacheImpl

public class FieldCacheImpl extends Object implements FieldCache
Expert: The default cache implementation, storing all values in memory. A WeakHashMap is used for storage.

Created: May 19, 2004 4:40:36 PM

author
Tim Jones (Nacimiento Software)
since
lucene 1.4
version
$Id: FieldCacheImpl.java 377502 2006-02-13 21:46:13Z yonik $

Fields Summary
private static final IntParser
INT_PARSER
private static final FloatParser
FLOAT_PARSER
final Map
cache
The internal cache. Maps Entry to array of interpreted term values.
Constructors Summary
Methods Summary
public java.lang.ObjectgetAuto(org.apache.lucene.index.IndexReader reader, java.lang.String field)
removed for java 1.3 compatibility protected static final Object pFloats = Pattern.compile ("[0-9+\\-\\.eEfFdD]+");

    field = field.intern();
    Object ret = lookup (reader, field, SortField.AUTO);
    if (ret == null) {
      TermEnum enumerator = reader.terms (new Term (field, ""));
      try {
        Term term = enumerator.term();
        if (term == null) {
          throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type");
        }
        if (term.field() == field) {
          String termtext = term.text().trim();

          /**
           * Java 1.4 level code:

           if (pIntegers.matcher(termtext).matches())
           return IntegerSortedHitQueue.comparator (reader, enumerator, field);

           else if (pFloats.matcher(termtext).matches())
           return FloatSortedHitQueue.comparator (reader, enumerator, field);
           */

          // Java 1.3 level code:
          try {
            Integer.parseInt (termtext);
            ret = getInts (reader, field);
          } catch (NumberFormatException nfe1) {
            try {
              Float.parseFloat (termtext);
              ret = getFloats (reader, field);
            } catch (NumberFormatException nfe2) {
              ret = getStringIndex (reader, field);
            }
          }
          if (ret != null) {
            store (reader, field, SortField.AUTO, ret);
          }
        } else {
          throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed");
        }
      } finally {
        enumerator.close();
      }

    }
    return ret;
  
public java.lang.Comparable[]getCustom(org.apache.lucene.index.IndexReader reader, java.lang.String field, org.apache.lucene.search.SortComparator comparator)

    field = field.intern();
    Object ret = lookup (reader, field, comparator);
    if (ret == null) {
      final Comparable[] retArray = new Comparable[reader.maxDoc()];
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term (field, ""));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          Comparable termval = comparator.getComparable (term.text());
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
          }
        } while (termEnum.next());
      } finally {
        termDocs.close();
        termEnum.close();
      }
      store (reader, field, comparator, retArray);
      return retArray;
    }
    return (Comparable[]) ret;
  
public float[]getFloats(org.apache.lucene.index.IndexReader reader, java.lang.String field)

    return getFloats(reader, field, FLOAT_PARSER);
  
public float[]getFloats(org.apache.lucene.index.IndexReader reader, java.lang.String field, FloatParser parser)

    field = field.intern();
    Object ret = lookup (reader, field, parser);
    if (ret == null) {
      final float[] retArray = new float[reader.maxDoc()];
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term (field, ""));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          float termval = parser.parseFloat(term.text());
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
          }
        } while (termEnum.next());
      } finally {
        termDocs.close();
        termEnum.close();
      }
      store (reader, field, parser, retArray);
      return retArray;
    }
    return (float[]) ret;
  
public int[]getInts(org.apache.lucene.index.IndexReader reader, java.lang.String field)

    return getInts(reader, field, INT_PARSER);
  
public int[]getInts(org.apache.lucene.index.IndexReader reader, java.lang.String field, IntParser parser)

    field = field.intern();
    Object ret = lookup (reader, field, parser);
    if (ret == null) {
      final int[] retArray = new int[reader.maxDoc()];
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term (field, ""));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          int termval = parser.parseInt(term.text());
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
          }
        } while (termEnum.next());
      } finally {
        termDocs.close();
        termEnum.close();
      }
      store (reader, field, parser, retArray);
      return retArray;
    }
    return (int[]) ret;
  
public org.apache.lucene.search.FieldCache.StringIndexgetStringIndex(org.apache.lucene.index.IndexReader reader, java.lang.String field)

    field = field.intern();
    Object ret = lookup (reader, field, STRING_INDEX);
    if (ret == null) {
      final int[] retArray = new int[reader.maxDoc()];
      String[] mterms = new String[reader.maxDoc()+1];
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term (field, ""));
      int t = 0;  // current term number

      // an entry for documents that have no terms in this field
      // should a document with no terms be at top or bottom?
      // this puts them at the top - if it is changed, FieldDocSortedHitQueue
      // needs to change as well.
      mterms[t++] = null;

      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;

          // store term text
          // we expect that there is at most one term per document
          if (t >= mterms.length) throw new RuntimeException ("there are more terms than " +
                  "documents in field \"" + field + "\", but it's impossible to sort on " +
                  "tokenized fields");
          mterms[t] = term.text();

          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = t;
          }

          t++;
        } while (termEnum.next());
      } finally {
        termDocs.close();
        termEnum.close();
      }

      if (t == 0) {
        // if there are no terms, make the term array
        // have a single null entry
        mterms = new String[1];
      } else if (t < mterms.length) {
        // if there are less terms than documents,
        // trim off the dead array space
        String[] terms = new String[t];
        System.arraycopy (mterms, 0, terms, 0, t);
        mterms = terms;
      }

      StringIndex value = new StringIndex (retArray, mterms);
      store (reader, field, STRING_INDEX, value);
      return value;
    }
    return (StringIndex) ret;
  
public java.lang.String[]getStrings(org.apache.lucene.index.IndexReader reader, java.lang.String field)

    field = field.intern();
    Object ret = lookup (reader, field, SortField.STRING);
    if (ret == null) {
      final String[] retArray = new String[reader.maxDoc()];
      TermDocs termDocs = reader.termDocs();
      TermEnum termEnum = reader.terms (new Term (field, ""));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          String termval = term.text();
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
          }
        } while (termEnum.next());
      } finally {
        termDocs.close();
        termEnum.close();
      }
      store (reader, field, SortField.STRING, retArray);
      return retArray;
    }
    return (String[]) ret;
  
java.lang.Objectlookup(org.apache.lucene.index.IndexReader reader, java.lang.String field, int type)
See if an object is in the cache.


           
          
    Entry entry = new Entry (field, type);
    synchronized (this) {
      HashMap readerCache = (HashMap)cache.get(reader);
      if (readerCache == null) return null;
      return readerCache.get (entry);
    }
  
java.lang.Objectlookup(org.apache.lucene.index.IndexReader reader, java.lang.String field, java.lang.Object comparer)
See if a custom object is in the cache.

    Entry entry = new Entry (field, comparer);
    synchronized (this) {
      HashMap readerCache = (HashMap)cache.get(reader);
      if (readerCache == null) return null;
      return readerCache.get (entry);
    }
  
java.lang.Objectstore(org.apache.lucene.index.IndexReader reader, java.lang.String field, int type, java.lang.Object value)
Put an object into the cache.

    Entry entry = new Entry (field, type);
    synchronized (this) {
      HashMap readerCache = (HashMap)cache.get(reader);
      if (readerCache == null) {
        readerCache = new HashMap();
        cache.put(reader,readerCache);
      }
      return readerCache.put (entry, value);
    }
  
java.lang.Objectstore(org.apache.lucene.index.IndexReader reader, java.lang.String field, java.lang.Object comparer, java.lang.Object value)
Put a custom object into the cache.

    Entry entry = new Entry (field, comparer);
    synchronized (this) {
      HashMap readerCache = (HashMap)cache.get(reader);
      if (readerCache == null) {
        readerCache = new HashMap();
        cache.put(reader, readerCache);
      }
      return readerCache.put (entry, value);
    }