lucene&IKAnalyzer入门实例

最近研究了下IKAnalyzer,发现果然很强大,而且说明文档很齐全,上手简单,随便写了个例子。留念。


Ikanalyzer是一个分词工具,lucene是全文检索。

IKAnalyzer 和 IKSimilarity 目前都是直接继承了 Lucene,IKQueryParser是一个新的实现类,如果需要修改可以查看IK的源代码(IK源码)。

用户可以指定数据集使用lucene建立索引,再对这些索引进行查询,lucene可以自动对相似度高的进行排序。

在更新索引时,在实例化IndexWriter是可以提供boolean型的参数,判断是否需要覆盖,还是增加。在更新索引时,找到需要更新的索引,删除之,再添加修改后的索引。

在使用扩展词典的时候,按照说明文档上的说法就是UTF-8的无BOM文档,还有个新手提示说要空一行。其实没那么负责的,打开EditPlus,新建一个无BOM的UTF-8文档,直接一行一个词组添加即可,什么都不用加,也不用空行。


package com.ik;

import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
import org.wltea.analyzer.IKSegmentation;
import org.wltea.analyzer.Lexeme;
import org.wltea.analyzer.lucene.IKAnalyzer;
import org.wltea.analyzer.lucene.IKQueryParser;
import org.wltea.analyzer.lucene.IKSimilarity;

public class IndexUtil {

	// 索引存储路径
	private final static String INDEX_PATH = "./myindex";

	public void createIndex() {

		// 实例化IKAnalyzer分词器
		Analyzer analyzer = new IKAnalyzer(true);

		IndexWriter iwriter = null;

		try {
			// 创建索引实例
			iwriter = new IndexWriter(FSDirectory.open(new File(INDEX_PATH)),
					analyzer, true, IndexWriter.MaxFieldLength.LIMITED);

			DB db = DB.getInstance();
			db.setDbName("youtee");
			db.open();
			ResultSet rs = db.select("select * from stadium");

			while (rs.next()) {

				// 创建Document
				Document doc = new Document();

				// 添加 field
				doc.add(new Field("sid", rs.getString("sid"), Field.Store.YES,
						Field.Index.ANALYZED));

				doc.add(new Field("name", rs.getString("name"),
						Field.Store.YES, Field.Index.ANALYZED));

				doc.add(new Field("address", rs.getString("address"),
						Field.Store.YES, Field.Index.ANALYZED));

				doc.add(new Field("description", rs.getString("description"),
						Field.Store.YES, Field.Index.ANALYZED));

				iwriter.addDocument(doc);
			}

			// 执行优化
			iwriter.optimize();

			// 关闭indexWriter
			iwriter.close();

		} catch (CorruptIndexException e) {
			e.printStackTrace();
		} catch (LockObtainFailedException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} catch (SQLException e) {
			e.printStackTrace();
		}
	}

	public List search(String keyword, String field, int returnNum) {
		IndexSearcher isearcher = null;
		List resultList = new ArrayList();

		try {
			// 实例化搜索器
			isearcher = new IndexSearcher(FSDirectory
					.open(new File(INDEX_PATH)), true);

			// 在索引器中使用IKSimilarity相似度评估器
			isearcher.setSimilarity(new IKSimilarity());

			// 使用IKQueryParser查询分析器构造Query对象,fileName->需要搜索的域
			// Query query = IKQueryParser.parse(type, keyword);

			// 不需要使用分词,只对输入的关键字进行查询
			BooleanQuery aBooleanQuery = new BooleanQuery();

			String qParts[] = keyword.split("\\s");
			for (String q : qParts) {
				Query query = new TermQuery(new Term(field, q));
				if (query != null
						&& (!(query instanceof BooleanQuery) || ((BooleanQuery) query)
								.getClauses().length > 0)) {
					aBooleanQuery.add(query, Occur.SHOULD);
				}
			}

			// 搜索相似度最高的n条记录
			TopDocs topDocs = isearcher.search(aBooleanQuery, returnNum);

			returnNum = returnNum <= topDocs.totalHits ? returnNum
					: topDocs.totalHits;
			
			System.out.println("命中:" + topDocs.totalHits);

			// 输出结果
			ScoreDoc[] scoreDocs = topDocs.scoreDocs;

			for (int i = 0; i < returnNum; i++) {
				Document targetDoc = isearcher.doc(scoreDocs[i].doc);
				resultList.add(targetDoc.get(field));

				// System.out.println("内容:" + targetDoc.toString());
			}

		} catch (CorruptIndexException e) {
			e.printStackTrace();
		} catch (LockObtainFailedException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			if (isearcher != null) {
				try {
					isearcher.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}

		return resultList;
	}

	public static void main(String[] args) {
		IndexUtil indexUtil = new IndexUtil();
//		 indexUtil.createIndex();

		ArrayList list = (ArrayList) indexUtil.search("体育 中秋", "address", 100);
		for (int i = 0; i < list.size(); i++) {
			System.out.println(list.get(i));
		}

		
	}

	public void getSplitWord(String keyword) {
		Reader reader = new StringReader(keyword);
		IKSegmentation iks = new IKSegmentation(reader, false);

		try {
			Lexeme lexeme = null;
			do {
				lexeme = iks.next();
				System.out.println(lexeme != null ? lexeme.getLexemeText()
						: "++结束++");
			} while (lexeme != null);

		} catch (IOException e) {
			// TODO: handle exception
			System.out.println(e.getMessage());
		}
	}
}



说明:依赖jar包:lucene-core-2.3.2.jar、IKAnalyzer3.2.8.jar。 一、LuceneUtil 工具类代码: package com.zcm.lucene; import java.io.File; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Hits; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.wltea.analyzer.IKSegmentation; import org.wltea.analyzer.Lexeme; /** * Apache Lucene全文检索IKAnalyzer分词工具类 * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneUtil { /**索引创建的路径**/ private static String LucenePath = "d://index"; /** * 创建索引 * @throws Exception */ public static int createIndex(List list) throws Exception{ /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); /**注意最后一个boolean类型的参数:表示是否重新创建,true表示新创建(以前存在时回覆盖)**/ IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); for (int i = 0; i < list.size(); i++) { LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**查看IndexWriter里面有多少个索引**/ int num = indexWriter.docCount(); System.out.println("总共------》" + num); indexWriter.optimize(); indexWriter.close(); return num; } /** * IKAnalyzer分词 * @param word * @return * @throws IOException */ public static List tokenWord(String word) throws IOException{ List tokenArr = new ArrayList(); StringReader reader = new StringReader(word); /**当为true时,分词器进行最大词长切分**/ IKSegmentation ik = new IKSegmentation(reader, true); Lexeme lexeme = null; while ((lexeme = ik.next()) != null){ tokenArr.add(lexeme.getLexemeText()); } return tokenArr; } /** * 创建索引(单个) * @param list * @throws Exception */ public static void addIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer, false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void addIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 更新索引(单个) * @param list * @throws Exception */ public static void updateIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void updateIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(单个) * @param list * @throws Exception */ public static void deleteIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void deleteIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**删除索引 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 检索数据 * @param word * @return */ public static List search(String word) { List list = new ArrayList(); Hits hits = null; try { IndexSearcher searcher = new IndexSearcher(LucenePath); String[] queries = {word,word}; String[] fields = {"title", "remark"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD}; Query query = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer()); if (searcher != null) { /**hits结果**/ hits = searcher.search(query); LuceneVO vo = null; for (int i = 0; i < hits.length(); i++) { Document doc = hits.doc(i); vo = new LuceneVO(); vo.setAid(Integer.parseInt(doc.get("aid"))); vo.setRemark(doc.get("remark")); vo.setTitle(doc.get("title")); list.add(vo); } } } catch (Exception ex) { ex.printStackTrace(); } return list; } } 二、Lucene用到的JavaBean代码: package com.zcm.lucene; /** * Apache Lucene全文检索用到的Bean * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneVO { private Integer aid; /**文章ID**/ private String title; /**文章标题**/ private String remark; /**文章摘要**/ public Integer getAid() { return aid; } public void setAid(Integer aid) { this.aid = aid; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getRemark() { return remark; } public void setRemark(String remark) { this.remark = remark; } } 备注:源码来源于www.91zcm.com 开源博客中的全文检索代码。(http://www.91zcm.com/)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值