导入2篇文章,用结巴分词选出权重Top10的词语,转化成哈希编码,对比2篇文章的汉明距离,阈值设置为18,汉明距离小于等于18的为文本相似,反之不相似。另外,有没有大神帮忙回复下怎么写一个导入2篇文章的接口,谢谢。
import re
import codecs
import jieba
import jieba.analyse
import numpy as np
fr1 = 'C:/Users/Administrator/Desktop/2.txt'
fr2 = 'C:/Users/Administrator/Desktop/4.txt'
class simhash:
def __init__(self,content):
self.simhash=self.simhash(content)
def __str__(self):
return str(self.simhash)
def simhash(self,content):
#seg = jieba.cut(content)
#jieba.analyse.set_stop_words('stopword.txt')
keyWord = jieba.analyse.extract_tags(
'|'.join(content), topK=10, withWeight=True, allowPOS=())#在这里对jieba的tfidf.py进行了修改
#将tags = sorted(freq.items(), key=itemgetter(1), reverse=True)修改成tags = sorted(freq.items(), key=itemgetter(1,0), reverse=True)
#即先按照权重排序,再按照词排序
keyList = []
for feature, weight