java统计词频算法_基于LUCENE的java词频统计

package demo.analysis;

import java.io.BufferedReader;

import java.io.File;

import java.io.FileInputStream;

import java.io.FileNotFoundException;

import java.io.IOException;

import java.io.InputStreamReader;

import java.io.Reader;

import java.io.StringReader;

import java.io.UnsupportedEncodingException;

import java.util.HashMap;

import java.util.Iterator;

import java.util.Map;

import jeasy.analysis.MMAnalyzer;

import org.apache.lucene.analysis.Token;

import org.apache.lucene.analysis.TokenStream;

public class Segment {

public static void main(String args[]) throws IOException {

Segment s = new Segment();

String text = s.ReadFileByBufferdeReader("./1.txt");

//  System.out.println(text);

s.getWordByReader(text);

}

public String ReadFileByBufferdeReader(String readFileName) {

String temp = "";

File f = new File(readFileName);

InputStreamReader read;

try {

read = new InputStreamReader(new FileInputStream(f), "utf-8");

BufferedReader reader = new BufferedReader(read);

String line;

while ((line = reader.readLine()) != null) {

temp += line + "\n";

}

} catch (UnsupportedEncodingException e) {

// TODO Auto-generated catch block

e.printStackTrace();

} catch (FileNotFoundException e) {

// TODO Auto-generated catch block

e.printStackTrace();

} catch (IOException e) {

// TODO Auto-generated catch block

e.printStackTrace();

}

return temp;

}

public void getWordByReader(String text) {

//采用正向最大匹配的中文分词算法

MMAnalyzer analyzer = new MMAnalyzer();

analyzer.addWord("任家坪");

Mapmap = new HashMap();

try {

System.out.println("Length = " + text.length());

Reader r = new StringReader(text);

TokenStream ts = analyzer.tokenStream(null, r);

System.out.println("开始分词...\n");

long begin = System.currentTimeMillis();

for (Token t = ts.next(); t != null; t = ts.next()) {

String str = t.termText();

Object o = map.get(str);

if (o == null) {

map.put(str, new Integer(1));

} else {

Integer I = new Integer(((Integer) o).intValue() + 1);

map.put(str, I);

}

}

//System.out.println(t.startOffset() + " - " + t.endOffset() + " = " + t.termText());

for (Iterator iter = map.entrySet().iterator(); iter.hasNext();) {

Map.Entry entry = (Map.Entry) iter.next();

System.out.println(entry.getKey() + ":" + entry.getValue());

}

long end = System.currentTimeMillis();

System.out.println("分词数量: "+map.size()+" 耗时 : " + (end - begin) + "ms");

} catch (IOException e) {

e.printStackTrace();

}

}

public void getWordBySegment(String text) {

MMAnalyzer analyzer = new MMAnalyzer();

try {

System.out.println(analyzer.segment(text, "\n"));

} catch (IOException e) {

// TODO Auto-generated catch block

e.printStackTrace();

}

}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
您好!针对您的问题,可以使用 Lucene 的分词器和分析器来进行去除停用词和词干提取。以下是一个简单的示例: ```java import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.util.Version; import java.io.IOException; import java.io.StringReader; public class LuceneStemmingExample { public static void main(String[] args) throws IOException { String text = "Lucene is a Java full-text search engine. " + "Lucene is not a complete application, but rather a code library " + "and lucene is used to add search capability to applications."; Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_7_7_0); analyzer = new StopAnalyzer(Version.LUCENE_7_7_0); // 添加停用词 analyzer = new PorterStemFilter(analyzer); // 添加词干提取器 StringReader reader = new StringReader(text); TokenStream tokenStream = analyzer.tokenStream("", reader); CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); tokenStream.reset(); while (tokenStream.incrementToken()) { System.out.println(charTermAttribute.toString()); } tokenStream.end(); tokenStream.close(); } } ``` 在上面的示例中,我们使用了 Lucene 的 `StandardAnalyzer` 分析器来对文本进行分词,然后使用了 `StopAnalyzer` 停用词分析器来去除停用词,最后使用了 `PorterStemFilter` 词干提取器来对词进行词干提取。最终输出的结果如下: ``` lucene java full text search engine lucene complete application rather code library lucene used add search capability applications ``` 可以看到,输出的词已经被去除了停用词并被进行了词干提取。希望这个示例对您有所帮助!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值