如上一篇所说,Lucene原生功能很强大,但是很遗憾的是,Lucene官方却不支持中文分词,所以需要其他插件辅助,这里我选择使用
IK Analyzer进行中文分词。
中文分词(Chinese Word Segmentation) 指的是将一个汉字序列切分成一个一个单独的词。分词就是将连续的字序列按照一定的规范重新组合成词序列的过程。我们知道,在英文的行文中,单词之间是以空格作为自然分界符的,而中文只是字、句和段能通过明显的分界符来简单划界,唯独词没有一个形式上的分界符,虽然英文也同样存在短语的划分问题,不过在词这一层上,中文比之英文要复杂的多、困难的多。
下载的包中没有源码,所以只使用了jar包,测试没有问题
代码摘自如下链接
并略作修改:
http://my.oschina.net/letiantian/blog/323887
import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;
public class IKAnalyzerDemo {
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
//建立索引
String text1 = "IK Analyzer是一个结合词典分词和文法分词的中文分词开源工具包。它使用了全新的正向迭代最细粒度切" +
"分算法。";
String text2 = "中文分词工具包可以和lucene是一起使用的";
String text3 = "中文分词,你妹";
String fieldName = "contents";
Analyzer analyzer = new IKAnalyzer();
RAMDirectory directory = new RAMDirectory();
IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LATEST, analyzer);
IndexWriter indexWriter = new IndexWriter(directory, writerConfig);
Document document1 = new Document();
document1.add(new StringField("ID", "1", Field.Store.YES));
document1.add(new StringField(fieldName, text1, Field.Store.YES));
indexWriter.addDocument(document1);
Document document2 = new Document();
document2.add(new StringField("ID", "2", Field.Store.YES));
document2.add(new TextField(fieldName, text2, Field.Store.YES));
indexWriter.addDocument(document2);
Document document3 = new Document();
document3.add(new StringField("ID", "2", Field.Store.YES));
document3.add(new TextField(fieldName, text3, Field.Store.YES));
indexWriter.addDocument(document3);
indexWriter.close();
//搜索
DirectoryReader indexReader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(indexReader);
String request = "中文分词";
QueryParser parser = new QueryParser(fieldName, analyzer);
parser.setDefaultOperator(QueryParser.AND_OPERATOR);
try {
Query query = parser.parse(request);
TopDocs topDocs = searcher.search(query, 5);
System.out.println("命中数:"+topDocs.totalHits);
ScoreDoc[] docs = topDocs.scoreDocs;
for(ScoreDoc doc : docs){
Document d = searcher.doc(doc.doc);
System.out.println("内容:"+d.get(fieldName));
}
} catch (ParseException e) {
e.printStackTrace();
}finally{
if(indexReader != null){
try{
indexReader.close();
}catch (IOException e) {
e.printStackTrace();
}
}
if(directory != null){
try{
directory.close();
}catch (Exception e) {
e.printStackTrace();
}
}
}
}
}
|