FileDocCategorySizeDatePackage
TestIndexWriterMerging.javaAPI DocApache Lucene 2.0.03169Fri May 26 09:54:12 BST 2006org.apache.lucene.index

TestIndexWriterMerging.java

package org.apache.lucene.index;
/**
 * Copyright 2006 The Apache Software Foundation
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import junit.framework.TestCase;

import java.io.IOException;


public class TestIndexWriterMerging extends TestCase
{

  /**
   * Tests that index merging (specifically addIndexes()) doesn't
   * change the index order of documents.
   */
  public void testLucene() throws IOException
  {

    int num=100;

    Directory indexA = new RAMDirectory();
    Directory indexB = new RAMDirectory();

    fillIndex(indexA, 0, num);
    boolean fail = verifyIndex(indexA, 0);
    if (fail)
    {
      fail("Index a is invalid");
    }

    fillIndex(indexB, num, num);
    fail = verifyIndex(indexB, num);
    if (fail)
    {
      fail("Index b is invalid");
    }

    Directory merged = new RAMDirectory();

    IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(), true);
    writer.setMergeFactor(2);

    writer.addIndexes(new Directory[]{indexA, indexB});
    writer.close();
    merged.close();

    fail = verifyIndex(merged, 0);

    assertFalse("The merged index is invalid", fail);
  }

  private boolean verifyIndex(Directory directory, int startAt) throws IOException
  {
    boolean fail = false;
    IndexReader reader = IndexReader.open(directory);

    int max = reader.maxDoc();
    for (int i = 0; i < max; i++)
    {
      Document temp = reader.document(i);
      //System.out.println("doc "+i+"="+temp.getField("count").stringValue());
      //compare the index doc number to the value that it should be
      if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
      {
        fail = true;
        System.out.println("Document " + (i + startAt) + " is returning document " + temp.getField("count").stringValue());
      }
    }
    return fail;
  }

  private void fillIndex(Directory dir, int start, int numDocs) throws IOException
  {

    IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true);
    writer.setMergeFactor(2);
    writer.setMaxBufferedDocs(2);

    for (int i = start; i < (start + numDocs); i++)
    {
      Document temp = new Document();
      temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.UN_TOKENIZED));

      writer.addDocument(temp);
    }
    writer.close();
  }
}