python实现文本相似度算法的对比及

文本相似度算法的对比及python实现
前言
通常我们有这样的需求:对两篇文章或者产品内容进行重复率查询。

为了解决类似的问题,罗列了一些常见的相似度算法,用python代码实现。

五种常见的相似度算法:余弦相似度(cosine_similarity)、jaccard相似度、编辑距离(Levenshtein)、MinHash、SimHash + 海明距离。

代码是一位前辈留下的,做一下整理分享出来。算法的具体理论这里就不硬搬生套了,大家可以自行搜索。有任何问题欢迎留言,谢谢!

余弦相似度cosine_similarity

# -*- coding: utf-8 -*-

# 正则包
import re
# html 包
import html
# 自然语言处理包
import jieba
import jieba.analyse
# 机器学习包
from sklearn.metrics.pairwise import cosine_similarity


class CosineSimilarity(object):
    """
    余弦相似度
    """
    def __init__(self, content_x1, content_y2):
        self.s1 = content_x1
        self.s2 = content_y2

    @staticmethod
    def extract_keyword(content):  # 提取关键词
        # 正则过滤 html 标签
        re_exp = re.compile(r'(<style>.*?</style>)|(<[^>]+>)', re.S)
        content = re_exp.sub(' ', content)
        # html 转义符实体化
        content = html.unescape(content)
        # 切割
        seg = [i for i in jieba.cut(content, cut_all=True) if i != '']
        # 提取关键词
        keywords = jieba.analyse.extract_tags("|".join(seg), topK=200, withWeight=False)
        return keywords

    @staticmethod
    def one_hot(word_dict, keywords):  # oneHot编码
        # cut_code = [word_dict[word] for word in keywords]
        cut_code = [0]*len(word_dict)
        for word in keywords:
            cut_code[word_dict[word]] += 1
        return cut_code

    def main(self):
        # 去除停用词
        jieba.analyse.set_stop_words('./files/stopwords.txt')

        # 提取关键词
        keywords1 = self.extract_keyword(self.s1)
        keywords2 = self.extract_keyword(self.s2)
        # 词的并集
        union = set(keywords1).union(set(keywords2))
        # 编码
        word_dict = {}
        i = 0
        for word in union:
            word_dict[word] = i
            i += 1
        # oneHot编码
        s1_cut_code = self.one_hot(word_dict, keywords1)
        s2_cut_code = self.one_hot(word_dict, keywords2)
        # 余弦相似度计算
        sample = [s1_cut_code, s2_cut_code]
        # 除零处理
        try:
            sim = cosine_similarity(sample)
            return sim[1][0]
        except Exception as e:
            print(e)
            return 0.0


# 测试
if __name__ == '__main__':
    with open('./files/sample_x.txt', 'r') as x, open('./files/sample_y.txt', 'r') as y:
        content_x = x.read()
        content_y = y.read()
        similarity = CosineSimilarity(content_x, content_y)
        similarity = similarity.main()
        print('相似度: %.2f%%' % (similarity*100))

输出结果:

Building prefix dict from the default dictionary ...
Loading model from cache /tmp/jieba.cache
Loading model cost 0.915 seconds.
Prefix dict has been built succesfully.
相似度: 79.67%

jaccard相似度

# -*- coding: utf-8 -*-

# 正则包
import re
# 自然语言处理包
import jieba
import jieba.analyse
# html 包
import html


class JaccardSimilarity(object):
    """
    jaccard相似度
    """
    def __init__(self, content_x1, content_y2):
        self.s1 = content_x1
        self.s2 = content_y2

    @staticmethod
    def extract_keyword(content):  # 提取关键词
        # 正则过滤 html 标签
        re_exp = re.compile(r'(<style>.*?</style>)|(<[^>]+>)', re.S)
        content = re_exp.sub(' ', content)
        # html 转义符实体化
        content = html.unescape(content)
        # 切割
        seg = [i for i in jieba.cut(content, cut_all=True) if i != '']
        # 提取关键词
        keywords = jieba.analyse.extract_tags("|".join(seg), topK=200, withWeight=False)
        return keywords

    def main(self):
        # 去除停用词
        jieba.analyse.set_stop_words('./files/stopwords.txt')

        # 分词与关键词提取
        keywords_x = self.extract_keyword(self.s1)
        keywords_y = self.extract_keyword(self.s2)

        # jaccard相似度计算
        intersection = len(list(set(keywords_x).intersection(set(keywords_y))))
        union = len(list(set(keywords_x).union(set(keywords_y))))
        # 除零处理
        sim = float(intersection)/union if union != 0 else 0
        return sim


# 测试
if __name__ == '__main__':
    with open('./files/sample_x.txt', 'r') as x, open('./files/sample_y.txt', 'r') as y:
        content_x = x.read()
        content_y = y.read()
        similarity = JaccardSimilarity(content_x, content_y)
        similarity = similarity.main()
        print('相似度: %.2f%%' % (similarity*100))

输出结果:

Building prefix dict from the default dictionary ...
Loading model from cache /tmp/jieba.cache
Loading model cost 0.893 seconds.
Prefix dict has been built succesfully.
相似度: 66.20%

编辑距离Levenshtein

# -*- coding: utf-8 -*-

# 正则包
import re
# html 包
import html
# 自然语言处理包
import jieba
import jieba.analyse
# 编辑距离包
import Levenshtein


class LevenshteinSimilarity(object):
    """
    编辑距离
    """
    def __init__(self, content_x1, content_y2):
        self.s1 = content_x1
        self.s2 = content_y2

    @staticmethod
    def extract_keyword(content):  # 提取关键词
        # 正则过滤 html 标签
        re_exp = re.compile(r'(<style>.*?</style>)|(<[^>]+>)', re.S)
        content = re_exp.sub(' ', content)
        # html 转义符实体化
        content = html.unescape(content)
        # 切割
        seg = [i for i in jieba.cut(content, cut_all=True) if i != '']
        # 提取关键词
        keywords = jieba.analyse.extract_tags("|".join(seg), topK=200, withWeight=False)
        return keywords

    def main(self):
        # 去除停用词
        jieba.analyse.set_stop_words('./files/stopwords.txt')

        # 提取关键词
        keywords1 = ', '.join(self.extract_keyword(self.s1))
        keywords2 = ', '.join(self.extract_keyword(self.s2))

        # ratio计算2个字符串的相似度,它是基于最小编辑距离
        distances = Levenshtein.ratio(keywords1, keywords2)
        return distances


# 测试
if __name__ == '__main__':
    with open('./files/sample_x.txt', 'r') as x, open('./files/sample_y.txt', 'r') as y:
        content_x = x.read()
        content_y = y.read()
        distance = LevenshteinSimilarity(content_x, content_y)
        distance = distance.main()
        print('相似度: %.2f%%' % (distance * 100))

输出结果:

Building prefix dict from the default dictionary ...
Loading model from cache /tmp/jieba.cache
Loading model cost 0.786 seconds.
Prefix dict has been built succesfully.
相似度: 82.24%

MinHash

# -*- coding: utf-8 -*-

# 正则包
import re
# 自然语言处理包
import jieba
import jieba.analyse
# html 包
import html
# 数据集处理包
from datasketch import MinHash


class MinHashSimilarity(object):
    """
    MinHash
    """
    def __init__(self, content_x1, content_y2):
        self.s1 = content_x1
        self.s2 = content_y2

    @staticmethod
    def extract_keyword(content):  # 提取关键词
        # 正则过滤 html 标签
        re_exp = re.compile(r'(<style>.*?</style>)|(<[^>]+>)', re.S)
        content = re_exp.sub(' ', content)
        # html 转义符实体化
        content = html.unescape(content)
        # 切割
        seg = [i for i in jieba.cut(content, cut_all=True) if i != '']
        # 提取关键词
        keywords = jieba.analyse.extract_tags("|".join(seg), topK=200, withWeight=False)
        return keywords

    def main(self):
        # 去除停用词
        jieba.analyse.set_stop_words('./files/stopwords.txt')

        # MinHash计算
        m1, m2 = MinHash(), MinHash()
        # 提取关键词
        s1 = self.extract_keyword(self.s1)
        s2 = self.extract_keyword(self.s2)

        for data in s1:
            m1.update(data.encode('utf8'))
        for data in s2:
            m2.update(data.encode('utf8'))

        return m1.jaccard(m2)


# 测试
if __name__ == '__main__':
    with open('./files/sample_x.txt', 'r') as x, open('./files/sample_y.txt', 'r') as y:
        content_x = x.read()
        content_y = y.read()
        similarity = MinHashSimilarity(content_x, content_y)
        similarity = similarity.main()
        print('相似度: %.2f%%' % (similarity*100))

输出结果:

Building prefix dict from the default dictionary ...
Loading model from cache /tmp/jieba.cache
Loading model cost 0.901 seconds.
Prefix dict has been built succesfully.
相似度: 64.84%

SimHash + 海明距离

# -*- coding: utf-8 -*-

# 正则
import re
# html 包
import html
# 数学包
import math
# 自然语言处理包
import jieba
import jieba.analyse


class SimHashSimilarity(object):
    """
    SimHash
    """
    def __init__(self, content_x1, content_y2):
        self.s1 = content_x1
        self.s2 = content_y2

    @staticmethod
    def get_bin_str(source):  # 字符串转二进制
        if source == "":
            return 0
        else:
            t = ord(source[0]) << 7
            m = 1000003
            mask = 2 ** 128 - 1
            for c in source:
                t = ((t * m) ^ ord(c)) & mask
            t ^= len(source)
            if t == -1:
                t = -2
            t = bin(t).replace('0b', '').zfill(64)[-64:]
            return str(t)

    @staticmethod
    def extract_keyword(content):  # 提取关键词
        # 正则过滤 html 标签
        re_exp = re.compile(r'(<style>.*?</style>)|(<[^>]+>)', re.S)
        content = re_exp.sub(' ', content)
        # html 转义符实体化
        content = html.unescape(content)
        # 切割
        seg = [i for i in jieba.cut(content, cut_all=True) if i != '']
        # 提取关键词
        keywords = jieba.analyse.extract_tags("|".join(seg), topK=200, withWeight=True)
        return keywords

    def run(self, keywords):
        ret = []
        for keyword, weight in keywords:
            bin_str = self.get_bin_str(keyword)
            key_list = []
            for c in bin_str:
                weight = math.ceil(weight)
                if c == "1":
                    key_list.append(int(weight))
                else:
                    key_list.append(-int(weight))
            ret.append(key_list)
        # 对列表进行"降维"
        rows = len(ret)
        cols = len(ret[0])
        result = []
        for i in range(cols):
            tmp = 0
            for j in range(rows):
                tmp += int(ret[j][i])
            if tmp > 0:
                tmp = "1"
            elif tmp <= 0:
                tmp = "0"
            result.append(tmp)
        return "".join(result)

    def main(self):
        # 去除停用词
        jieba.analyse.set_stop_words('./files/stopwords.txt')

        # 提取关键词
        s1 = self.extract_keyword(self.s1)
        s2 = self.extract_keyword(self.s2)

        sim_hash1 = self.run(s1)
        sim_hash2 = self.run(s2)
        # print(f'相似哈希指纹1: {sim_hash1}\n相似哈希指纹2: {sim_hash2}')
        length = 0
        for index, char in enumerate(sim_hash1):
            if char == sim_hash2[index]:
                continue
            else:
                length += 1
        return length


# 测试
if __name__ == '__main__':
    with open('./files/sample_x.txt', 'r') as x, open('./files/sample_y.txt', 'r') as y:
        content_x = x.read()
        content_y = y.read()

        similarity = SimHashSimilarity(content_x, content_y)
        similarity = similarity.main()
        # 阀值
        threshold = 3
        print(f'海明距离:{similarity} 判定距离:{threshold} 是否相似:{similarity <= threshold}')

输出结果:

Building prefix dict from the default dictionary ...
Loading model from cache /tmp/jieba.cache
Loading model cost 0.903 seconds.
Prefix dict has been built succesfully.
海明距离:17 判定距离:3 是否相似:False

讨论
几种算法对比:

余弦相似度

计算复杂度偏高。

相关研究中,基于物品协同过滤系统的相似性度量方法普遍使用余弦相似性。 然而,在许多实际应用中,数据稀疏度过高,通过余弦相似度计算会产生误导性结果。

jaccard相似度

在产品描述中,很多运营人员为了偷懒,喜欢复制粘贴稍作修改,造成产品描述重复度高。通过提取产品描述的关键词,再计算两组关键词的交集并集非常适合在此场景下检测产品描述的重复度,即杰卡德相似度。

编辑距离

计算复杂度偏高,且在此场景效果并不理想。

MinHash

在大数据集中求杰尔德相似度的解决方案,通过对数据文本的降维,大大提高计算速度。

SimHash + 海明距离

对单词数量低于500的文章误差较大。

MinHash与SimHash的区别:https://blog.csdn.net/dm_ustc/article/details/45626907

根据需求场景,选择合适的算法,或者集成算法。代码详情见github:https://github.com/downdawn/Similarity

笔记,有任何问题欢迎留言,谢谢!

  • 2
    点赞
  • 43
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Python中的文本相似度可以通过基于TF-IDF和余弦相似度算法实现。TF-IDF(Term Frequency-Inverse Document Frequency)是用于评估一个词语在一个文档中的重要程度的方法。 首先,我们需要使用Python中的文本处理库(如nltk)来对文本进行预处理,包括分词、去除停用词、词干化等。接下来,我们可以使用sklearn库中的TF-IDF向量化器来将文本转换为TF-IDF特征向量。 然后,我们可以使用余弦相似度算法计算两个文本之间的相似度。余弦相似度是通过计算两个向量之间的夹角来度量它们的相似程度的。 以下是一个简单的示例代码: ```python import nltk from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity def preprocess_text(text): # 分词 tokens = nltk.word_tokenize(text) # 去除停用词 stop_words = set(stopwords.words('english')) tokens = [token for token in tokens if token.lower() not in stop_words] # 词干化 stemmer = nltk.PorterStemmer() tokens = [stemmer.stem(token) for token in tokens] # 返回处理后的文本 return " ".join(tokens) def calculate_similarity(text1, text2): # 预处理文本 processed_text1 = preprocess_text(text1) processed_text2 = preprocess_text(text2) # 转换为TF-IDF特征向量 vectorizer = TfidfVectorizer() tfidf_matrix = vectorizer.fit_transform([processed_text1, processed_text2]) # 计算余弦相似度 cosine_sim = cosine_similarity(tfidf_matrix[0], tfidf_matrix[1]) # 返回相似度 return cosine_sim[0][0] text1 = "今天天气不错" text2 = "今天天气很好" similarity = calculate_similarity(text1, text2) print("文本1和文本2的相似度为:", similarity) ``` 在以上示例中,我们先对文本进行了预处理,并使用TF-IDF向量化器将其转换为特征向量。然后,我们使用余弦相似度算法计算文本1和文本2之间的相似度,并输出结果。 这只是一个简单的示例,实际应用中可能需要更多的预处理步骤和参数调整来获得更好的结果。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值