Methods Summary |
---|
private void | addDoc(org.apache.lucene.index.IndexWriter iw, int i)
Document d = new Document();
Fieldable f;
int scoreAndID = i+1;
f = new Field(ID_FIELD,id2String(scoreAndID),Field.Store.YES,Field.Index.UN_TOKENIZED); // for debug purposes
f.setOmitNorms(true);
d.add(f);
f = new Field(TEXT_FIELD,"text of doc"+scoreAndID+textLine(i),Field.Store.NO,Field.Index.TOKENIZED); // for regular search
f.setOmitNorms(true);
d.add(f);
f = new Field(INT_FIELD,""+scoreAndID,Field.Store.NO,Field.Index.UN_TOKENIZED); // for function scoring
f.setOmitNorms(true);
d.add(f);
f = new Field(FLOAT_FIELD,scoreAndID+".000",Field.Store.NO,Field.Index.UN_TOKENIZED); // for function scoring
f.setOmitNorms(true);
d.add(f);
iw.addDocument(d);
log("added: "+d);
|
protected float | expectedFieldScore(java.lang.String docIDFieldVal)
return Float.parseFloat(docIDFieldVal.substring(2));
|
protected java.lang.String | id2String(int scoreAndID)
String s = "000000000"+scoreAndID;
int n = (""+N_DOCS).length() + 3;
int k = s.length() - n;
return "ID"+s.substring(k);
|
protected void | log(java.lang.Object o)
if (DBG) {
System.out.println(o.toString());
}
|
protected void | setUp()
// prepare a small index with just a few documents.
super.setUp();
dir = new RAMDirectory();
anlzr = new StandardAnalyzer();
IndexWriter iw = new IndexWriter(dir,anlzr);
// add docs not exactly in natural ID order, to verify we do check the order of docs by scores
int remaining = N_DOCS;
boolean done[] = new boolean[N_DOCS];
int i = 0;
while (remaining>0) {
if (done[i]) {
throw new Exception("to set this test correctly N_DOCS="+N_DOCS+" must be primary and greater than 2!");
}
addDoc(iw,i);
done[i] = true;
i = (i+4)%N_DOCS;
remaining --;
}
iw.close();
|
protected void | tearDown()
super.tearDown();
dir = null;
anlzr = null;
|
private java.lang.String | textLine(int docNum)
return DOC_TEXT_LINES[docNum % DOC_TEXT_LINES.length];
|