MySimHash:
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import org.apache.commons.lang3.StringUtils;
import org.jsoup.Jsoup;
import org.jsoup.safety.Whitelist;
import java.math.BigInteger;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class MySimHash {
private String tokens; //字符串
private BigInteger strSimHash;//字符产的hash值
private int hashbits = 64; // 分词后的hash数;
public MySimHash(String tokens) {
this.tokens = tokens;
this.strSimHash = this.simHash();
}
MySimHash(String tokens, int hashbits) {
this.tokens = tokens;
this.hashbits = hashbits;
this.strSimHash = this.simHash();
}
/**
* 清除html标签
* @param content
* @return
*/
private String cleanResume(String content) {
// 若输入为HTML,下面会过滤掉所有的HTML的tag
content = Jsoup.clean(content, Whitelist.none());
content = StringUtils.lowerCase(content);
String[] strings = {" ", "\n", "\r", "\t", "\\r", "\\n", "\\t", " "};
for (String s : strings) {
content = content.replaceAll(s, "");
}
return content;
}
/**
* 这个是对整个字符串进行hash计算
* @return
*/
private BigInteger simHash() {
tokens = cleanResume(tokens); // cleanResume 删除一些特殊字符
int[] v = new int[this.hashbits];
List<Term> termList = StandardTokenizer.segment(this.tokens); // 对字符串进行分词
//对分词的一些特殊处理 : 比如: 根据词性添加权重 , 过滤掉标点符号 , 过滤超频词汇等;
Map<String, Integer> weightOfNature = new HashMap<String, Integer>(); // 词性的权重
weightOfNature.put("n", 2); //给名词的权重是2;
Map<String, String> stopNatures = new HashMap<String, String>();//停用的词性 如一些标点符号之类的;
stopNatures.put("w", ""); //
int overCount = 5; //设定超频词汇的界限 ;
Map<String, Integer> wordCount = new HashMap<String, Integer>();
for (Term term : termList) {
String word = term.word; //分词字符串
String nature = term.nature.toString(); // 分词属性;
// 过滤超频词
if (wordCount.containsKey(word)) {
int count = wordCount.get(word);
if (count > overCount) {
continue;
}
wordCount.put(word, count + 1);
} else {
wordCount.put(word, 1);
}
// 过滤停用词性
if (stopNatures.containsKey(nature)) {
continue;
}
// 2、将每一个分词hash为一组固定长度的数列.比如 64bit 的一个整数.
BigInteger t = this.hash(word);
for (int i = 0; i < this.hashbits; i++) {
BigInteger bitmask = new BigInteger("1").shiftLeft(i);
// 3、建立一个长度为64的整数数组(假设要生成64位的数字指纹,也可以是其它数字),
// 对每一个分词hash后的数列进行判断,如果是1000...1,那么数组的第一位和末尾一位加1,
// 中间的62位减一,也就是说,逢1加1,逢0减1.一直到把所有的分词hash数列全部判断完毕.
int weight = 1; //添加权重
if (weightOfNature.containsKey(nature)) {
weight = weightOfNature.get(nature);
}
if (t.and(bitmask).signum() != 0) {
// 这里是计算整个文档的所有特征的向量和
v[i] += weight;
} else {
v[i] -= weight;
}
}
}
BigInteger fingerprint = new BigInteger("0");
for (int i = 0; i < this.hashbits; i++) {
if (v[i] >= 0) {
fingerprint = fingerprint.add(new BigInteger("1").shiftLeft(i));
}
}
return fingerprint;
}
/**
* 对单个的分词进行hash计算;
* @param source
* @return
*/
private BigInteger hash(String source) {
if (source == null || source.length() == 0) {
return new BigInteger("0");
} else {
/**
* 当sourece 的长度过短,会导致hash算法失效,因此需要对过短的词补偿
*/
while (source.length() < 3) {
source = source + source.charAt(0);
}
char[] sourceArray = source.toCharArray();
BigInteger x = BigInteger.valueOf(((long) sourceArray[0]) << 7);
BigInteger m = new BigInteger("1000003");
BigInteger mask = new BigInteger("2").pow(this.hashbits).subtract(new BigInteger("1"));
for (char item : sourceArray) {
BigInteger temp = BigInteger.valueOf((long) item);
x = x.multiply(m).xor(temp).and(mask);
}
x = x.xor(new BigInteger(String.valueOf(source.length())));
if (x.equals(new BigInteger("-1"))) {
x = new BigInteger("-2");
}
return x;
}
}
/**
* 计算海明距离,海明距离越小说明越相似;
* @param other
* @return
*/
int hammingDistance(MySimHash other) {
BigInteger m = new BigInteger("1").shiftLeft(this.hashbits).subtract(
new BigInteger("1"));
BigInteger x = this.strSimHash.xor(other.strSimHash).and(m);
int tot = 0;
while (x.signum() != 0) {
tot += 1;
x = x.and(x.subtract(new BigInteger("1")));
}
return tot;
}
public double getSemblance(MySimHash s2 ){
double i = (double) this.hammingDistance(s2);
return 1 - i/this.hashbits ;
}
public static void main(String[] args) {
String s1 = " 云计算厂商服务器采购需求不及预期 ";
String s2 = " 服务器需求不及预期 ";
String s3 = " 中金:上调比亚迪H股目标价60%,高端车“汉”持续放量可期 ";
String s4 = " 中金:上调比亚迪H股目标价60%至138港元,高端车“汉”持续放量可期 ";
long l3 = System.currentTimeMillis();
MySimHash hash1 = new MySimHash(s1, 64);
MySimHash hash2 = new MySimHash(s2, 64);
MySimHash hash3 = new MySimHash(s3, 64);
MySimHash hash4 = new MySimHash(s4, 64);
System.out.println("======================================");
System.out.println( hash1.hammingDistance(hash2) );
// System.out.println( hash2.hammingDistance(hash3) );
System.out.println( hash3.hammingDistance(hash4) );
System.out.println( hash1.getSemblance(hash4) );
// System.out.println( hash2.getSemblance(hash3) );
System.out.println( hash2.getSemblance(hash4) );
long l4 = System.currentTimeMillis();
System.out.println(l4-l3);
System.out.println("======================================");
}
}
FilterInclusion:
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import org.apache.commons.lang3.StringUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* created on 2020/10/15
*/
public class FilterInclusion {
public List<Term> getSegList(String sen) {
List<Term> termList = StandardTokenizer.segment(sen);
return termList;
}
// 计算句前重复
public double getSimilarity(String s1, String s2) {
List ls1 = getSegList(s1);
List ls2 = getSegList(s2);
int len = ls1.size() <= ls2.size() ? ls1.size() : ls2.size();
double count = 0;
double similarity = 0;
for (int i = 0; i < len; i++) {
List ls3 = ls2.subList(0, len);
if (ls3.contains(ls1.get(i))) {
count += 1;
}
}
similarity = count / (len);
// 考虑句长
// similarity = count / (Math.log(ls1.size()+ls2.size()));
return similarity;
}
// 计算字符重复次数
// public double getSimiliarity1(String s1,String s2){
// double dis = StringUtils.getFuzzyDistance(s1, s2, Locale.CHINA);
// return dis;
// }
// 通过海明距离计算相似度
public double getSimilarity1(String s1, String s2) {
double similarity = 0;
MySimHash hash1 = new MySimHash(s1, 64);
MySimHash hash2 = new MySimHash(s2, 64);
similarity = hash1.getSemblance(hash2);
return similarity;
}
public List drop_duplicate(List stringList) {
System.out.println("stringList:"+stringList.size());
List indexList = new ArrayList<Integer>();
//两两过滤
// for (int i = stringList.size()-1; i >=0; i--) {
// if (i == 0) {
// break;
// }
// String str1 = (String) stringList.get(i);
// String str2 = (String) stringList.get(i - 1);
// double similarity1 = getSimilarity(str1, str2);
// double similarity2 = getSimilarity1(str1, str2);
// if (similarity1 > 0.5 || similarity2>0.65) {
// if (str1.length() >= str2.length()) {
// stringList.remove(i-1);
// indexList.add(i-1);
// } else {
// stringList.remove(i);
// indexList.add(i);
// }
// }
// }
//全部过滤
for (int i = stringList.size()-1; i >=0; i--) {
if (i == 0) {
break;
}
for (int j = stringList.size()-1; j>=0; j--) {
if (i==j)continue;
String str1 = (String) stringList.get(i);
String str2 = (String) stringList.get(j);
double similarity1 = getSimilarity(str1, str2);
double similarity2 = getSimilarity1(str1, str2);
if (similarity1 > 0.5 || similarity2>0.65) {
if (str1.length() >= str2.length()) {
indexList.add(j);
} else {
// stringList.remove(i);
indexList.add(i);
}
}
}
}
indexList = (List) indexList.stream().distinct().collect(Collectors.toList());
System.out.println("indexList:"+indexList.toString());
return indexList;
}
public static void main(String[] args) {
FilterInclusion test = new FilterInclusion();
List<String> stringList = new ArrayList<String>();
stringList.add("农产品和原材料价格波动、磷酸一铵价格大幅下跌风险、成本测算的局限性、环保政策风险、竞争加剧&复合肥销量不及预期风险");
stringList.add("成本测算局限性");
stringList.add("新能源汽车需求不及预期");
stringList.add("新能源汽车销量不及预期");
stringList.add("疫情导致下半年招生不及预期,核心高管流失,市场竞争激烈");
stringList.add("行业需求增长不及预期");
stringList.add("因疫情造成经营业绩下降的风险");
stringList.add("铅锌产品价格持续大幅下滑");
stringList.add("公司资源勘探低于预期,铅锌金属价格下跌,全球流动性收紧黄金价格下跌");
// stringList.add("广州港:前9月预计完成货物吞吐量37874万吨,同比增长1.4%");
// stringList.add("广州港:前9月预计完成货物吞吐量同比增长1.4%");
// stringList.add("下游需求不及预期");
// stringList.add("下游需求不及预期,市场竞争加剧");
// stringList.add("中金:上调比亚迪H股目标价60%,高端车“汉”持续放量可期");
// stringList.add("中金:上调比亚迪H股目标价60%至138港元,高端车“汉”持续放量可期");
// stringList.add("云计算厂商服务器采购需求不及预期");
// stringList.add("服务器需求不及预期");
System.out.println(stringList);
System.out.println(test.drop_duplicate(stringList));
}
}
直接调用FilterInclusion类,句子以ArrayList输入,返回要删除语句的索引