前言:为了保证搜索引擎的智能化和高效率,我选择采用lucene搜索引擎,但是其自带的SmartChineseAnalyzer分词器效果不太完善,因为选用IKAnalyzer。
一、安装
下载IKAnalyzer2012_u6.jar文件
修改pom文件:
<dependency>
<groupId>com.janeluo</groupId>
<artifactId>ikanalyzer</artifactId>
<version>2012_u6</version>
</dependency>
二、创建IKAnalyzer6x.java 和IKTokenizer6x.java
IKAnalyzer6x.java:
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
public class IKAnalyzer6x extends Analyzer{
private boolean useSmart;
public boolean useSmart(){
return useSmart;
}
public void setUseSmart(boolean useSmart){
this.useSmart=useSmart;
}
public IKAnalyzer6x(){
this(false);//IK分词器lucene analyzer接口实现类,默认细粒度切分算法
}
//重写最新版本createComponents;重载analyzer接口,构造分词组件
@Override
protected TokenStreamComponents createComponents(String filedName) {
Tokenizer _IKTokenizer=new IKTokenizer6x(this.useSmart);
return new TokenStreamComponents(_IKTokenizer);
}
public IKAnalyzer6x(boolean useSmart){
super();
this.useSmart=useSmart;
}
}
IKTokenizer6x.java:
import java.io.IOException;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;
public class IKTokenizer6x extends Tokenizer{
//ik分词器实现
private IKSegmenter _IKImplement;
//词元文本属性
private final CharTermAttribute termAtt;
//词元位移属性
private final OffsetAttribute offsetAtt;
//词元分类属性
private final TypeAttribute typeAtt;
//记录最后一个词元的结束位置
private int endPosition;
//构造函数,实现最新的Tokenizer
public IKTokenizer6x(boolean useSmart){
super();
offsetAtt=addAttribute(OffsetAttribute.class);
termAtt=addAttribute(CharTermAttribute.class);
typeAtt=addAttribute(TypeAttribute.class);
_IKImplement=new IKSegmenter(input, useSmart);
}
@Override
public final boolean incrementToken() throws IOException {
//清除所有的词元属性
clearAttributes();
Lexeme nextLexeme=_IKImplement.next();
if(nextLexeme!=null){
//将lexeme转成attributes
termAtt.append(nextLexeme.getLexemeText());
termAtt.setLength(nextLexeme.getLength());
offsetAtt.setOffset(nextLexeme.getBeginPosition(),
nextLexeme.getEndPosition());
//记录分词的最后位置
endPosition=nextLexeme.getEndPosition();
typeAtt.setType(nextLexeme.getLexemeText());
return true;//告知还有下个词元
}
return false;//告知词元输出完毕
}
@Override
public void reset() throws IOException {
super.reset();
_IKImplement.reset(input);
}
@Override
public final void end(){
int finalOffset = correctOffset(this.endPosition);
offsetAtt.setOffset(finalOffset, finalOffset);
}
}
三、分词器测试
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import com.arno.ik.IKAnalyzer6x;
/*
* 通过不同分词器analyzer实现对同一个
* 字符串的分词计算查看结果
*/
public class AnalyzerTest {
//编写静态方法,实现打印分词词项的逻辑
public static void printResult(Analyzer analyzer,String str) throws Exception{
//将str转化成字符串输入流
StringReader reader=new StringReader(str);
//anlyzer就是lucene的分词器接口,从analyzer中获取分词的流
//TokenStream
TokenStream tokenStream = analyzer.
tokenStream("test", reader);
tokenStream.reset();
//打印词项,拿到字符属性对象
CharTermAttribute attr=
tokenStream.getAttribute(CharTermAttribute.class);
//挨个打印词项
while(tokenStream.incrementToken()){
System.out.println(attr.toString());
}
}
public static void main(String[] args) throws Exception{
Analyzer a=new IKAnalyzer6x();
String msg="采用CCD技术并如何降低机床的成本";
System.out.println("**********IK分词器**********");
printResult(a, msg);
}
}