import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.jsoup.safety.Whitelist;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary.*;
import static java.lang.Math.log;
public class MySimHash {
private String tokens; //字符串
private BigInteger strSimHash;//字符产的hash值
private int hashbits = 64; // 分词后的hash数;
public MySimHash(){
}
public MySimHash(String tokens) {
this.tokens = tokens;
this.strSimHash = this.simHash();
}
private MySimHash(String tokens, int hashbits) {
this.tokens = tokens;
this.hashbits = hashbits;
this.strSimHash = this.simHash();
}
/**
* 清除html标签
* @param content
* @return
*/
private String cleanResume(String content) {
// 若输入为HTML,下面会过滤掉所有的HTML的tag
content = Jsoup.clean(content, Whitelist.none());
content = StringUtils.lowerCase(content);
String[] strings = {" ", "\n", "\r", "\t", "\\r", "\\n", "\\t", " "};
for (String s : strings) {
content = content.replaceAll(s, "");
}
return content;
}
/**
* 这个是对整个字符串进行hash计算
* @return
*/
private BigInteger simHash() {
tokens = cleanResume(tokens); // cleanResume 删除一些特殊字符
int[] v = new int[this.hashbits];
List<Term> termList = StandardTokenizer.segment(this.tokens); // 对字符串进行分词
//对分词的一些特殊处理 : 比如: 根据词性添加权重 , 过滤掉标点符号 , 过滤超频词汇等;
Map<String, Integer> weightOfNature = new HashMap<String, Integer>(); // 词性的权重
weightOfNature.put("n", 2); //给名词的权重是2;
Map<String, String> stopNatures = new HashMap<String, String>();//停用的词性 如一些标点符号之类的;
stopNatures.put("
利用hanlp比较文本相似度
最新推荐文章于 2024-09-14 07:12:33 发布