SimHash算法
概述
-
SimHash算法来自于 GoogleMoses Charikar发表的一篇论文“detecting near-duplicates for web crawling” ,其主要思想是降维, 将高维的特征向量映射成低维的特征向量,通过两个向量的Hamming Distance(汉明距离)来确定文章是否重复或者高度近似。
-
Hamming Distance: 又称汉明距离,在信息论中,两个等长字符串之间的汉明距离是两个字符串对应位置的不同字符的个数。也就是说,它就是将一个字符串变换成另外一个字符串所需要替换的字符个数。
一、实现思路
- 分词:对需要比较的文本进行分词,提取特征向量。并对特征向量,进行权重(weight)设置。
- hash: 通过hash函数计算各个特征向量的hash值。hash值为二进制数0 1 组成的n-bit签名。
- 加权:在hash值的基础上,给所有特征向量进行加权,即
W = Hash * weight
,且遇到1则hash值和权值正相乘,遇到0则hash值和权值负相乘。 - 合并:将上述各个特征向量的加权结果累加,变成只有一个序列串。拿前两个特征向量举例。
- 降维:对于n-bit签名的累加结果,如果大于0则置1,否则置0,从而得到该语句的simhash值,最后我们便可以根据不同语句simhash的海 明距离来判断它们的相似度。
- 计算:通过Simhash签名值,计算汉明距离。
二、实现流程图
三、代码实现
python版——jieba分词
import jieba
import jieba.analyse
import numpy as np
class Simhash(object):
def simhash(self, content):
keylist = []
# jieba分词
seg = jieba.cut(content)
# 去除停用词永祥
# jieba.analyse.set_stop_words("stopwords.txt")
# 得到前20个分词和tf-idf权值
keywords = jieba.analyse.extract_tags("|".join(seg), topK=20, withWeight=True, allowPOS=())
print(keywords)
for feature, weight in keywords:
print(weight)
weight = int(weight * 20)
print(weight)
print("k="+feature)
feature = self.string_hash(feature)
print("v="+feature)
temp = []
for i in feature:
if i == "1":
temp.append(weight)
else:
temp.append(-1 * weight)
keylist.append(temp)
list1 = np.sum(np.array(keylist), axis=0)
if keylist == []:
return "00"
simhash = ""
# 降维处理
for i in list1:
if i > 0:
simhash += "1"
else:
simhash += "0"
return simhash
def string_hash(self, source):
if source == "":
return 0
else:
x = ord(source[0]) << 7
m = 1000003
mask = 2 ** 128 - 1
for c in source:
x = ((x * m) ^ ord(c)) & mask
x ^= len(source)
if x == -1:
x = -2
x = bin(x).replace('0b', '').zfill(64)[-64:]
return str(x)
def hammingDis(s1,s2):
t1 = "0b" + s1
t2 = "0b" + s2
n = int(t1,2) ^ int(t2,2)
i = 0
print(bin(n))
while n:
n &= (n-1)
i += 1
print(i)
max_hashbit = max(len(bin(int(t1,2))), len(bin(int(t2,2))))
sim = i/max_hashbit
print(sim)
if __name__ == "__main__":
text1 = "天气非常好"
text2 = "天气真不错"
s1 = Simhash();
t1_hash = s1.simhash(text1)
t2_hash = s1.simhash(text2)
print(t1_hash)
print(t2_hash)
hammingDis(t1_hash,t2_hash)
Java 版——ansj分词
import org.ansj.domain.Term;
import org.ansj.recognition.impl.StopRecognition;
import org.ansj.splitWord.analysis.ToAnalysis;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class MySimHash {
/**
* 字符串
*/
private String tokens;
/**
* 字符产的hash值
*/
private BigInteger strSimHash;
/**
* 分词后的hash数
*/
private int hashbits = 64;
private StopRecognition filter;
public MySimHash(String tokens) {
this.tokens = tokens;
this.strSimHash = this.simHash();
}
private MySimHash(String tokens, int hashbits) {
this.tokens = tokens;
this.hashbits = hashbits;
this.strSimHash = this.simHash();
}
/**
* 这个是对整个字符串进行hash计算
* @return
*/
private BigInteger simHash() {
//初始化分词器
StopRecognition filter = new StopRecognition();
//过滤停词
List<String> stopWordList = new ArrayList<>();
stopWordList.add(",");
filter.insertStopWords(stopWordList);
int[] v = new int[this.hashbits];
//对字符串进行分词
List<Term> terms = ToAnalysis.parse(this.tokens).recognition(filter).getTerms();
//设定超频词汇的界限 ;
int overCount = 5;
Map<String, Integer> wordCount = new HashMap<String, Integer>();
for (Term term : terms) {
//分词字符串
String word = term.getName();
// 过滤超频词
if (wordCount.containsKey(word)) {
int count = wordCount.get(word);
if (count > overCount) {
continue;
}
wordCount.put(word, count + 1);
} else {
wordCount.put(word, 1);
}
// 计算hash
BigInteger t = this.hash(word);
for (int i = 0; i < this.hashbits; i++) {
BigInteger bitmask = new BigInteger("1").shiftLeft(i);
//添加权重(统一设置20,后续可以自行根据词的不同设置权重)
int weight = 20;
if (t.and(bitmask).signum() != 0) {
// 这里是计算整个文档的所有特征的向量和
v[i] += weight;
} else {
v[i] -= weight;
}
}
}
BigInteger fingerprint = new BigInteger("0");
for (int i = 0; i < this.hashbits; i++) {
if (v[i] >= 0) {
fingerprint = fingerprint.add(new BigInteger("1").shiftLeft(i));
}
}
return fingerprint;
}
/**
* 对单个的分词进行hash计算;
* @param source
* @return
*/
private BigInteger hash(String source) {
if (source == null || source.length() == 0) {
return new BigInteger("0");
} else {
//当sourece 的长度过短,会导致hash算法失效,因此需要对过短的词补偿
while (source.length() < 3) {
source = source + source.charAt(0);
}
char[] sourceArray = source.toCharArray();
BigInteger x = BigInteger.valueOf(((long) sourceArray[0]) << 7);
BigInteger m = new BigInteger("1000003");
BigInteger mask = new BigInteger("2").pow(this.hashbits).subtract(new BigInteger("1"));
for (char item : sourceArray) {
BigInteger temp = BigInteger.valueOf((long) item);
x = x.multiply(m).xor(temp).and(mask);
}
x = x.xor(new BigInteger(String.valueOf(source.length())));
if (x.equals(new BigInteger("-1"))) {
x = new BigInteger("-2");
}
return x;
}
}
/**
* 计算海明距离,海明距离越小说明越相似;
* @param other
* @return
*/
private int hammingDistance(MySimHash other) {
BigInteger m = new BigInteger("1").shiftLeft(this.hashbits).subtract(
new BigInteger("1"));
BigInteger x = this.strSimHash.xor(other.strSimHash).and(m);
int tot = 0;
while (x.signum() != 0) {
tot += 1;
x = x.and(x.subtract(new BigInteger("1")));
}
return tot;
}
public double getSemblance(MySimHash s2 ){
double i = (double) this.hammingDistance(s2);
return i/this.hashbits ;
}
public static void main(String[] args) {
String text1 = "天气非常好";
String text2 = "天气真不错";
MySimHash hash1 = new MySimHash(text1, 64);
MySimHash hash2 = new MySimHash(text2, 64);
System.out.println(hash1.hammingDistance(hash2) );
System.out.println(hash1.getSemblance(hash2));
}
}
注意:
python版是使用jieba分词,分词后带有权重值。java版使用ansj分词,需要自行手动设置权重。