import java.io.StringReader;
import net.paoding.analysis.analyzer.PaodingAnalyzer;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
public class TestPaoDing {
public static void main(String[] args) throws Exception {
// StandardAnalyzer: 一元分词
// Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_29) ;
// PaodingAnalyzer: 二元分词
Analyzer analyzer = new PaodingAnalyzer();
String indexStr = "人之所以痛苦,在于追求错误的东西";
StringReader reader = new StringReader(indexStr);
TokenStream ts = analyzer.tokenStream(null, reader);
TermAttribute termAtt = (TermAttribute) ts
.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = (TypeAttribute) ts
.getAttribute(TypeAttribute.class);
while (ts.incrementToken()) {
System.out.print(termAtt.term());
System.out.print(' ');
System.out.println(typeAtt.type());
}
}
}
=======================================================
import java.io.StringReader;
import net.paoding.analysis.analyzer.PaodingAnalyzer;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
public class TestPaoDing {
public static void main(String[] args) throws Exception {
//StandardAnalyzer: 一元分词
//Analyzer analyzer = new StandardAnalyzer();
//PaodingAnalyzer: 二元分词
Analyzer analyzer = new PaodingAnalyzer();
String indexStr = "围城,我们都是好孩子";
StringReader reader = new StringReader(indexStr);
TokenStream ts = analyzer.tokenStream(null, reader);
Token t = ts.next();
while (t != null) {
System.out.print(t.termText()+" ");
t = ts.next();
}
}
}