String fieldName = "text";
//检索内容
String text = "IK Analyzer是一个结合词典分词和文法分词的中文分词开源工具包。" +
"它使用了全新的正向迭代最细粒度切分算法。";
//实例化IKAnalyzer分词器
Analyzer analyzer = new IKAnalyzer(true);
Directory directory = null;
IndexWriter iwriter = null;
IndexReader ireader = null;
IndexSearcher isearcher = null;
try {
//建立内存索引对象
directory = new RAMDirectory();
//配置IndexWriterConfig
IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_34 , analyzer);
iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
iwriter = new IndexWriter(directory , iwConfig);
//写入索引
Document doc = new Document();
doc.add(new Field("ID", "10000", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field(fieldName, text, Field.Store.YES, Field.Index.ANALYZED));
iwriter.addDocument(doc);
iwriter.close();
//搜索过程**********************************
//实例化搜索器
ireader = IndexReader.open(directory);
isearcher = new IndexSearcher(ireader);
String keyword = " 3万";
//使用QueryParser查询分析器构造Query对象
QueryParser qp = new QueryParser(Version.LUCENE_34, fieldName, analyzer);
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
Query query = qp.parse(keyword);
//搜索相似度最高的5条记录
TopDocs topDocs = isearcher.search(query , 5);
System.out.println("命中:" + topDocs.totalHits);
//输出结果
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
for (int i = 0; i < topDocs.totalHits; i++){
Document targetDoc = isearcher.doc(scoreDocs[i].doc);
System.out.println("内容:" + targetDoc.toString()); }
}catch (CorruptIndexException e) {
e.printStackTrace();
}catch (LockObtainFailedException e) {
e.printStackTrace();
}catch (IOException e) {
e.printStackTrace();
}catch (ParseException e) {
e.printStackTrace();
}finally{
if(ireader != null){
try {
ireader.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if(directory != null){
try {
directory.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
发现bug:
采用智能分词,而keyword为 数字+数字单位(十、百、千、万、亿)如上面代码的红色部分,就会报错。
Exception in thread "main" java.lang.NullPointerException
at org.wltea.analyzer.core.AnalyzeContext.compound(AnalyzeContext.java:382)
at org.wltea.analyzer.core.AnalyzeContext.getNextLexeme(AnalyzeContext.java:325)
at org.wltea.analyzer.core.IKSegmenter.next(IKSegmenter.java:156)
at org.wltea.analyzer.lucene.IKTokenizer.incrementToken(IKTokenizer.java:73)
at org.apache.lucene.analysis.CachingTokenFilter.fillCache(CachingTokenFilter.java:78)
at org.apache.lucene.analysis.CachingTokenFilter.incrementToken(CachingTokenFilter.java:50)
at org.apache.lucene.queryParser.QueryParser.getFieldQuery(QueryParser.java:606)
at org.apache.lucene.queryParser.QueryParser.Term(QueryParser.java:1429)
at org.apache.lucene.queryParser.QueryParser.Clause(QueryParser.java:1317)
at org.apache.lucene.queryParser.QueryParser.Query(QueryParser.java:1245)
at org.apache.lucene.queryParser.QueryParser.TopLevelQuery(QueryParser.java:1234)
at org.apache.lucene.queryParser.QueryParser.parse(QueryParser.java:206)
at org.wltea.analyzer.sample.IKAnalyzerDemo.main(IKAnalyzerDemo.java:57)