# -*- encoding=utf-8 -*-
import jieba.posseg
import jieba.analyse
import math
import re
# jieba实现中文分词
def jieba_function(input1):
input1 = re.sub(r'\W*', '',input1)
# jieba.load_userdict("dic.txt")
jieba.analyse.set_stop_words("3.txt")
# 词典库问题,自己分词
jieba.suggest_freq(('看', '电视'), tune=True)
word_2 = jieba.cut(input1)
# print(' '.join(word_2))
return(','.join(word_2))
# 创建集合取词库,创建字典存词频,创建列表取词频向量
def bow(sent1, sent2):
sent1 = jieba_function(sent1)
sent2 = jieba_function(sent2)
print(sent1+'\n'+sent2)
sent3 = sent1+','+sent2
set1 = set((sent3).split(','))
dict1={};dict2={}
list_1=[];list_2=[]
for i in set1:
dict1[i] = sent1.count(i)
for i in set1:
dict2[i] = sent2.count(i)
for i in dict1.values():
list_1.append(i)
for i in dict2.values():
list_2.append(i)
print(dict1,list_1)
print(dict2,list_2)
print(count_cos_similarity(list_1, list_2))
# 余弦函数求余弦相似度
def count_cos_similarity(vec_1, vec_2):
if len(vec_1) != len(vec_2):
return 0
s = sum(vec_1[i] * vec_2[i] for i in range(len(vec_2)))
den1 = math.sqrt(sum([pow(number, 2) for number in vec_1]))
den2 = math.sqrt(sum([pow(number, 2) for number in vec_2]))
return s / (den1 * den2)
bow(input(),input())
敬请指正!