Lucene实现分词

1、我使用的jar为IKAnalyzer2012_u6.jar 请自行百度下载,并引用到项目中

2、pom.xml 添加引用

<dependency> <!-- 查询相关jar包 -->
	<groupId>org.apache.lucene</groupId>
	<artifactId>lucene-queryparser</artifactId>
	<version>6.0.0</version>
	</dependency>
	<dependency> <!-- lucene自带只能中文分词器jar包 -->
		<groupId>org.apache.lucene</groupId>
		<artifactId>lucene-analyzers-smartcn</artifactId>
		<version>6.0.0</version>
	</dependency>
	<dependency> <!-- 测试用到的lucene工具包 -->
		<groupId>org.apache.lucene</groupId>
		<artifactId>lucene-analyzers-common</artifactId>
		<version>6.0.0</version>
	</dependency>
	<dependency> <!-- 测试用到的lucene核心包 -->
		<groupId>org.apache.lucene</groupId>
		<artifactId>lucene-core</artifactId>
		<version>6.0.0</version>
	</dependency>

3、创建IKAnalyzer6x.java 和IKTokenizer6x.java

IKAnalyzer6x.java

package com.arno.ik;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;

public class IKAnalyzer6x extends Analyzer{
	private boolean useSmart;
	public boolean useSmart(){
		return useSmart;
	}
	public void setUseSmart(boolean useSmart){
		this.useSmart=useSmart;
	}
	public IKAnalyzer6x(){
		this(false);//IK分词器lucene analyzer接口实现类,默认细粒度切分算法
	}
	//重写最新版本createComponents;重载analyzer接口,构造分词组件
	@Override
	protected TokenStreamComponents createComponents(String filedName) {
		Tokenizer _IKTokenizer=new IKTokenizer6x(this.useSmart);
		return new TokenStreamComponents(_IKTokenizer);
	}
	public IKAnalyzer6x(boolean useSmart){
		super();
		this.useSmart=useSmart;
	}
	
}

IKTokenizer6x.java

package com.arno.ik;

import java.io.IOException;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

public class IKTokenizer6x extends Tokenizer{
	//ik分词器实现
	private IKSegmenter _IKImplement;
	//词元文本属性
	private final CharTermAttribute termAtt;
	//词元位移属性
	private final OffsetAttribute offsetAtt;
	//词元分类属性
	private final TypeAttribute typeAtt;
	//记录最后一个词元的结束位置
	private int endPosition;
	//构造函数,实现最新的Tokenizer
	public IKTokenizer6x(boolean useSmart){
		super();
		offsetAtt=addAttribute(OffsetAttribute.class);
		termAtt=addAttribute(CharTermAttribute.class);
		typeAtt=addAttribute(TypeAttribute.class);
		_IKImplement=new IKSegmenter(input, useSmart);
	}

	@Override
	public final boolean incrementToken() throws IOException {
		//清除所有的词元属性
		clearAttributes();
		Lexeme nextLexeme=_IKImplement.next();
		if(nextLexeme!=null){
			//将lexeme转成attributes
			termAtt.append(nextLexeme.getLexemeText());
			termAtt.setLength(nextLexeme.getLength());
			offsetAtt.setOffset(nextLexeme.getBeginPosition(), 
					nextLexeme.getEndPosition());
			//记录分词的最后位置
			endPosition=nextLexeme.getEndPosition();
			typeAtt.setType(nextLexeme.getLexemeText());
			return true;//告知还有下个词元
		}
		return false;//告知词元输出完毕
	}
	
	@Override
	public void reset() throws IOException {
		super.reset();
		_IKImplement.reset(input);
	}
	
	@Override
	public final void end(){
		int finalOffset = correctOffset(this.endPosition);
		offsetAtt.setOffset(finalOffset, finalOffset);
	}

}

创建好后。如果报错,请查看4个依赖是否已经加载完成和下载的IKAnalyzer2012_u6.jar是否引入

4、创建5个不同的分词器进行分词测试

package com.arno.test;

import java.io.StringReader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import com.arno.ik.IKAnalyzer6x;

/*
 * 通过不同分词器analyzer实现对同一个
 * 字符串的分词计算查看结果
 */
public class AnalyzerTest {
	
	//编写静态方法,实现打印分词词项的逻辑
	public static void printResult(Analyzer analyzer,String str) throws Exception{
		//将str转化成字符串输入流
		StringReader reader=new  StringReader(str);
		//anlyzer就是lucene的分词器接口,从analyzer中获取分词的流
		//TokenStream
		TokenStream tokenStream = analyzer.
				tokenStream("test", reader);
		tokenStream.reset();
		//打印词项,拿到字符属性对象
		CharTermAttribute attr=
				tokenStream.getAttribute(CharTermAttribute.class);
		//挨个打印词项
		while(tokenStream.incrementToken()){
			System.out.println(attr.toString());
		}
	}
	
	public static void main(String[] args) throws Exception{
		//准备lucene提供的各种分词器对象
		Analyzer a1=new SmartChineseAnalyzer();
		Analyzer a2=new WhitespaceAnalyzer();
		Analyzer a3=new SimpleAnalyzer();
		Analyzer a4=new StandardAnalyzer();
		Analyzer a5=new IKAnalyzer6x();
		String msg="大部分年轻人,现在都喜欢在,业余时间玩王者荣耀";
		
		System.out.println("**********智能中文分词器**********");
		printResult(a1, msg);
		System.out.println("**********IK分词器**********");
		printResult(a5, msg);
		System.out.println("**********空格分词器**********");
		printResult(a2, msg);
		System.out.println("**********简单分词器**********");
		printResult(a3, msg);
		System.out.println("**********标准分词器**********");
		printResult(a4, msg);
		
	}
}	


说明:依赖jar包:lucene-core-2.3.2.jar、IKAnalyzer3.2.8.jar。 一、LuceneUtil 工具类代码: package com.zcm.lucene; import java.io.File; import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.List; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Hits; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.wltea.analyzer.IKSegmentation; import org.wltea.analyzer.Lexeme; /** * Apache Lucene全文检索和IKAnalyzer分词工具类 * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneUtil { /**索引创建的路径**/ private static String LucenePath = "d://index"; /** * 创建索引 * @throws Exception */ public static int createIndex(List list) throws Exception{ /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); /**注意最后一个boolean类型的参数:表示是否重新创建,true表示新创建(以前存在时回覆盖)**/ IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true); for (int i = 0; i < list.size(); i++) { LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**查看IndexWriter里面有多少个索引**/ int num = indexWriter.docCount(); System.out.println("总共------》" + num); indexWriter.optimize(); indexWriter.close(); return num; } /** * IKAnalyzer分词 * @param word * @return * @throws IOException */ public static List tokenWord(String word) throws IOException{ List tokenArr = new ArrayList(); StringReader reader = new StringReader(word); /**当为true时,分词器进行最大词长切分**/ IKSegmentation ik = new IKSegmentation(reader, true); Lexeme lexeme = null; while ((lexeme = ik.next()) != null){ tokenArr.add(lexeme.getLexemeText()); } return tokenArr; } /** * 创建索引(单个) * @param list * @throws Exception */ public static void addIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer, false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void addIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); indexWriter.addDocument(doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 更新索引(单个) * @param list * @throws Exception */ public static void updateIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void updateIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**增加document到索引去 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Document doc = new Document(); Field FieldId = new Field("aid", String.valueOf(vo.getAid()),Field.Store.YES, Field.Index.NO); Field FieldTitle = new Field("title", vo.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); Field FieldRemark = new Field("remark", vo.getRemark(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(FieldId); doc.add(FieldTitle); doc.add(FieldRemark); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.updateDocument(term, doc); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(单个) * @param list * @throws Exception */ public static void deleteIndex(LuceneVO vo) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 创建索引(多个) * @param list * @throws Exception */ public static void deleteIndexs(List list) throws Exception { /**这里放索引文件的位置**/ File indexDir = new File(LucenePath); Analyzer luceneAnalyzer = new StandardAnalyzer(); IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,false); /**删除索引 **/ for (int i=0; i<list.size();i++){ LuceneVO vo = (LuceneVO)list.get(i); Term term = new Term("aid",String.valueOf(vo.getAid())); indexWriter.deleteDocuments(term); } /**optimize()方法是对索引进行优化 **/ indexWriter.optimize(); indexWriter.close(); } /** * 检索数据 * @param word * @return */ public static List search(String word) { List list = new ArrayList(); Hits hits = null; try { IndexSearcher searcher = new IndexSearcher(LucenePath); String[] queries = {word,word}; String[] fields = {"title", "remark"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD}; Query query = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer()); if (searcher != null) { /**hits结果**/ hits = searcher.search(query); LuceneVO vo = null; for (int i = 0; i < hits.length(); i++) { Document doc = hits.doc(i); vo = new LuceneVO(); vo.setAid(Integer.parseInt(doc.get("aid"))); vo.setRemark(doc.get("remark")); vo.setTitle(doc.get("title")); list.add(vo); } } } catch (Exception ex) { ex.printStackTrace(); } return list; } } 二、Lucene用到的JavaBean代码: package com.zcm.lucene; /** * Apache Lucene全文检索用到的Bean * Company: 91注册码 * time:2014-04-22 * @author www.91zcm.com * @date * @version 1.1 */ public class LuceneVO { private Integer aid; /**文章ID**/ private String title; /**文章标题**/ private String remark; /**文章摘要**/ public Integer getAid() { return aid; } public void setAid(Integer aid) { this.aid = aid; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getRemark() { return remark; } public void setRemark(String remark) { this.remark = remark; } } 备注:源码来源于www.91zcm.com 开源博客中的全文检索代码。(http://www.91zcm.com/)
Lucene是一个开源的全文搜索引擎库,它提供了丰富的搜索功能和高效的索引技术。二分法分词Lucene中的一种分词算法,用于将文本按照词语进行切分。 在Java中实现Lucene的二分法分词可以通过以下步骤: 1. 导入Lucene库:首先需要在Java项目中导入Lucene的相关库文件,可以通过Maven或手动下载添加到项目中。 2. 创建Analyzer对象:Analyzer是Lucene中的分析器,用于对文本进行分词处理。对于二分法分词,可以使用StandardAnalyzer或CJKAnalyzer。 3. 创建TokenStream对象:TokenStream是Lucene中的流式分词器,用于逐个获取文本中的词语。可以通过Analyzer的tokenStream方法创建TokenStream对象。 4. 获取词语:通过TokenStream对象的incrementToken方法逐个获取文本中的词语,并进行相应的处理操作。 下面是一个简单的示例代码: ```java import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; public class LuceneTokenizerExample { public static void main(String[] args) { String text = "Lucene是一个开源的全文搜索引擎库"; // 创建Analyzer对象 Analyzer analyzer = new StandardAnalyzer(); try { // 创建TokenStream对象 TokenStream tokenStream = analyzer.tokenStream(null, text); // 获取词语 CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); tokenStream.reset(); while (tokenStream.incrementToken()) { String term = charTermAttribute.toString(); System.out.println(term); } tokenStream.end(); tokenStream.close(); } catch (Exception e) { e.printStackTrace(); } finally { analyzer.close(); } } } ``` 运行以上代码,将输出以下结果: ``` Lucene 是 一个 开源 的 全文 搜索 引擎 库 ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值