lucene分词器分词

package com.essearch.core.analyzer;

import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;


//如果需要编写自己的中文分词器,可以参照Lucene-analyzers-commons-4.10.2.jar中cn包下的中文分词进行改造,编写符合特殊要求的分词器。假如我们需要对文档中的每个字符进行分词,那么核心代码如下:

public class MyNGramAnalyzer extends Analyzer {

	@Override
	protected TokenStreamComponents createComponents(String fieldName,
			Reader reader) {

		NGramTokenizer nGramTokenizer = new NGramTokenizer(reader,1,15);
		
		TokenStream result = new LowerCaseFilter(nGramTokenizer);
	
		 
		 return new TokenStreamComponents(nGramTokenizer,result);

	}

	private static void testtokenizer(Tokenizer tokenizer) {

		try {
			tokenizer.reset();
			while (tokenizer.incrementToken()) {
				CharTermAttribute charTermAttribute = tokenizer
						.addAttribute(CharTermAttribute.class);
				TermToBytesRefAttribute termToBytesRefAttribute = tokenizer
						.addAttribute(TermToBytesRefAttribute.class);
				PositionIncrementAttribute positionIncrementAttribute = tokenizer
						.addAttribute(PositionIncrementAttribute.class);
				PositionLengthAttribute positionLengthAttribute = tokenizer
						.addAttribute(PositionLengthAttribute.class);
				OffsetAttribute offsetAttribute = tokenizer
						.addAttribute(OffsetAttribute.class);
				TypeAttribute typeAttribute = tokenizer
						.addAttribute(TypeAttribute.class);
				// System.out.println(attribute.toString());
				System.out.println("term=" + charTermAttribute.toString() + ","
						+ offsetAttribute.startOffset() + "-"
						+ offsetAttribute.endOffset() + ",type="
						+ typeAttribute.type() + ",PositionIncrement="
						+ positionIncrementAttribute.getPositionIncrement()
						+ ",PositionLength="
						+ positionLengthAttribute.getPositionLength());

			}
			tokenizer.end();
			tokenizer.close();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	public static void main(String[] args) {

		String s = "编码规范从根本上解决了程序维护员的难题;规范的编码阅读和理解起来更容易,也可以快速的不费力气的借鉴别人的编码。对将来维护你编码的人来说,你的编码越优化,他们就越喜欢你的编码,理解起来也就越快。";
		StringReader sr = new StringReader(s);

		NGramTokenizer nGramTokenizer = new NGramTokenizer(sr,1,15);
		testtokenizer(nGramTokenizer);
		Analyzer analyzer=new MyNGramAnalyzer();
		testAnalyzer(analyzer,s);
		

	}

	private static void testAnalyzer(Analyzer analyzer,String data) {

	    TokenStream ts = null;
		try {
			ts = analyzer.tokenStream("myfield", new StringReader(data));
			//获取词元位置属性
		    OffsetAttribute  offset = ts.addAttribute(OffsetAttribute.class); 
		    //获取词元文本属性
		    CharTermAttribute term = ts.addAttribute(CharTermAttribute.class);
		    //获取词元文本属性
		    TypeAttribute type = ts.addAttribute(TypeAttribute.class);
			ts.reset(); 
			//迭代获取分词结果
			while (ts.incrementToken()) {
				System.out.println(offset.startOffset() + "-" + offset.endOffset() + ":" + term.toString() + "|" + type.type());
			}
			//关闭TokenStream(关闭StringReader)
			ts.end(); 
			

		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			//释放TokenStream的所有资源
			if(ts != null){
		      try {
				ts.close();
		      } catch (IOException e) {
				e.printStackTrace();
		      }
			}
	    }
		
	}

	@Override
	protected TokenStreamComponents createComponents(String arg0) {
		// TODO Auto-generated method stub
		return null;
	}

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值