IK Analyzer 和 lucene结合使用

特别注意:版本问题(IK Analyzer的版本中注意各个版本的细微区别,小编就是栽在IK Analyzer2012_U5 和IK Analyzer2012FF上了)

版本匹配:

1、IK Analyzer 2012_U5 支持的是 lucene 3.2-3.6版本

2、IK Analyzer2012 FF(你问啥是FF,其实就是For 4.0,在API和功能上保持不变,只是让其支持了Lucene4.0和Solr4.0,让这部分的用户能用起来。)

下载地址:

GoogleCode :http://code.google.com/p/ik-analyzer/downloads

配置说明:

把 IKAnalyzer中的IKAnalyzer.cfg.xml, ext.dic(如果找不到,可以手动创建一个该文件), stopword.dic文件放到代码的根目录中。

 

需要在项目中引入:

IKAnalyzer.cfg.xml

IKAnalyzer2012.jar

lucene-core-3.6.0.jar

stopword.dic

使用IK Analyzer实现

  1. import java.io.IOException;  
  2. import java.io.StringReader;  
  3. import org.apache.lucene.analysis.Analyzer;  
  4. import org.apache.lucene.analysis.TokenStream;  
  5. import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;  
  6. import org.wltea.analyzer.lucene.IKAnalyzer;  
  7.   
  8. public class Test2 {  
  9.     public static void main(String[] args) throws IOException {  
  10.         String text="基于java语言开发的轻量级的中文分词工具包";  
  11.         //创建分词对象  
  12.         Analyzer anal=new IKAnalyzer(true);       
  13.         StringReader reader=new StringReader(text);  
  14.         //分词  
  15.         TokenStream ts=anal.tokenStream("", reader);  
  16.         CharTermAttribute term=ts.getAttribute(CharTermAttribute.class);  
  17.         //遍历分词数据  
  18.         while(ts.incrementToken()){  
  19.             System.out.print(term.toString()+"|");  
  20.         }  
  21.         reader.close();  
  22.         System.out.println();  
  23.     }  
  24. }

 

使用lucene

 

  1. package com.haha.test;  
  2.   
  3. import java.io.IOException;  
  4. import java.io.StringReader;  
  5.   
  6. import org.wltea.analyzer.core.IKSegmenter;  
  7. import org.wltea.analyzer.core.Lexeme;  
  8.   
  9. public class Test3 {  
  10.       
  11.     public static void main(String[] args) throws IOException {  
  12.         String text="基于java语言开发的轻量级的中文分词工具包";  
  13.         StringReader sr=new StringReader(text);  
  14.         IKSegmenter ik=new IKSegmenter(sr, true);  
  15.         Lexeme lex=null;  
  16.         while((lex=ik.next())!=null){  
  17.             System.out.print(lex.getLexemeText()+"|");  
  18.         }  
  19.     }  
  20.   
  21. }  

 

运行后结果:

基于|java|语言|开发|的|轻量级|的|中文|分词|工具包|


搜索与推荐Wiki

扫一扫 关注微信公众号!号主 专注于搜索和推荐系统,尝试使用算法去更好的服务于用户,包括但不局限于机器学习,深度学习,强化学习,自然语言理解,知识图谱,还不定时分享技术,资料,思考等文章!


                             【技术服务】,详情点击查看:https://mp.weixin.qq.com/s/PtX9ukKRBmazAWARprGIAg


外包服务

 

说明:依赖jar包:lucene-core-2.3.2.jar、IKAnalyzer3.2.8.jar。 一、LuceneUtil 工具类代码: package com.zcm.lucene; import java.io.File; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Hits; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.wltea.analyzer.IKSegmentation; import org.wltea.analyzer.Lexeme; /** * Apache Lucene全文检索和IKAnalyzer分词工具类 * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneUtil { /**索引创建的路径**/ private static String LucenePath = "d://index"; /** * 创建索引 * @throws Exception */ public static int createIndex(List list) throws Exception{ /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); /**注意最后一个boolean类型的参数:表示是否重新创建,true表示新创建(以前存在时回覆盖)**/ IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); for (int i = 0; i < list.size(); i++) { LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**查看IndexWriter里面有多少个索引**/ int num = indexWriter.docCount(); System.out.println("总共------》" + num); indexWriter.optimize(); indexWriter.close(); return num; } /** * IKAnalyzer分词 * @param word * @return * @throws IOException */ public static List tokenWord(String word) throws IOException{ List tokenArr = new ArrayList(); StringReader reader = new StringReader(word); /**当为true时,分词器进行最大词长切分**/ IKSegmentation ik = new IKSegmentation(reader, true); Lexeme lexeme = null; while ((lexeme = ik.next()) != null){ tokenArr.add(lexeme.getLexemeText()); } return tokenArr; } /** * 创建索引(单个) * @param list * @throws Exception */ public static void addIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer, false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void addIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 更新索引(单个) * @param list * @throws Exception */ public static void updateIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void updateIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(单个) * @param list * @throws Exception */ public static void deleteIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void deleteIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**删除索引 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 检索数据 * @param word * @return */ public static List search(String word) { List list = new ArrayList(); Hits hits = null; try { IndexSearcher searcher = new IndexSearcher(LucenePath); String[] queries = {word,word}; String[] fields = {"title", "remark"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD}; Query query = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer()); if (searcher != null) { /**hits结果**/ hits = searcher.search(query); LuceneVO vo = null; for (int i = 0; i < hits.length(); i++) { Document doc = hits.doc(i); vo = new LuceneVO(); vo.setAid(Integer.parseInt(doc.get("aid"))); vo.setRemark(doc.get("remark")); vo.setTitle(doc.get("title")); list.add(vo); } } } catch (Exception ex) { ex.printStackTrace(); } return list; } } 二、Lucene用到的JavaBean代码: package com.zcm.lucene; /** * Apache Lucene全文检索用到的Bean * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneVO { private Integer aid; /**文章ID**/ private String title; /**文章标题**/ private String remark; /**文章摘要**/ public Integer getAid() { return aid; } public void setAid(Integer aid) { this.aid = aid; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getRemark() { return remark; } public void setRemark(String remark) { this.remark = remark; } } 备注:源码来源于www.91zcm.com 开源博客中的全文检索代码。(http://www.91zcm.com/)
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值