今天写的代码貌似不到100行,主要看了看关于pdf,word还有excel文件解析的内容,终于明白了怎么回事,但还没有具体操作,明天争取写出对这些文件建立索引和搜索的代码。以下是更新索引的代码:
import java.io.File;
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;建立索引
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
public class MyUpdateIndexer{
public static final String STORE_PATH = "E:/lucene_index";
public static void updateIndexes(String field , String keyword) throws
CorruptIndexException, IOException, ParseException
{
//首先,我们需要先将相应的document删除
Directory dir = FSDirectory.open(new File(STORE_PATH));
IndexReader reader = IndexReader.open(dir,false);
Term term = new Term(field,keyword);
reader.deleteDocuments(term);
reader.close();
//然后,将新的document加入索引
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_35);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_35,analyzer).setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, config);
for(int i = 0;i<100;i++)
{
Document doc = new Document();
doc.add((Fieldable) new Field("title", "lucene title"+i, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("content", "Apache Lucene(TM) is a high-performance", Field.Store.YES, Field.Index.ANALYZED));
//纯文本文件索引起来,而不想自己将它们读入字符串创建field
//这里的file就是该文本文件。该构造函数实际上是读去文件内容,并对其进行索引,但不存储。
//doc.add(new Field("path", new FileReader(new File("路径"))));
writer.addDocument(doc);
}
writer.close();
}
}