自然语言处理之比较两个句子的相似度 余弦相似度

1.句子如下:

s1 = "周杰伦是一个歌手,也是一个叉叉"
s2 = "周杰伦不是一个叉叉,但是是一个歌手"

2.分词:

import jieba
s1_list = [x for x in jieba.cut(s1,cut_all=True) if x != '']
s2_list = [x for x in jieba.cut(s2,cut_all=True) if x != '']

print (s1_list)
print (s2_list)

2.词频向量化:

import jieba
s1 = "周杰伦是一个歌手,也是一个叉叉"
s2 = "周杰伦不是一个叉叉,但是是一个歌手"

s1_list = [x for x in jieba.cut(s1,cut_all=True) if x != '']
s2_list = [x for x in jieba.cut(s2,cut_all=True) if x != '']
s1_set = set(s1_list)
s2_set = set (s2_list)
word_dict=dict()
i=0
for word in  s1_set.union(s2_set):
   word_dict [word] = i
   i+=1

#词频向量化函数,输入一个总字典和待向量化列表 def word_to_vec(word_dict,lists): word_count=dict() result=[0]*len(word_dict) for word in lists: if word_count.get(word,-1) == -1: word_count[word]=1 else: word_count[word]+=1 for word,freq in word_count.items(): wid = word_dict[word] result[wid]=freq return result s1_vec=(word_dict,s1_list) s2_vec=(word_dict,s2_list) print (s1_vec) print (s2_vec)

4.计算2个向量的相似度:

import jieba
import math
s1 = "周杰伦是一个歌手,也是一个叉叉"
s2 = "周杰伦不是一个叉叉,但是是一个歌手"

s1_list = [x for x in jieba.cut(s1,cut_all=True) if x != '']
s2_list = [x for x in jieba.cut(s2,cut_all=True) if x != '']
s1_set = set(s1_list)
s2_set = set (s2_list)
word_dict=dict()
i=0
for word in  s1_set.union(s2_set):
   word_dict [word] = i
   i+=1
#词频向量化函数 def word_to_vec(word_dict,lists): word_count=dict() result=[0]*len(word_dict) for word in lists: if word_count.get(word,-1) == -1: word_count[word]=1 else: word_count[word]+=1 for word,freq in word_count.items(): wid = word_dict[word] result[wid]=freq return result
#计算2个向量的余弦相似度
def cos_dist(a, b): if len(a) != len(b): return None part_up = 0.0 a_sq = 0.0 b_sq = 0.0 for a1, b1 in zip(a,b): part_up += a1*b1 a_sq += a1**2 b_sq += b1**2 part_down = math.sqrt(a_sq*b_sq) if part_down == 0.0: return None else: return part_up / part_down s1_vec = word_to_vec(word_dict,s1_list) s2_vec = word_to_vec(word_dict,s2_list) num=cos_dist(s1_vec,s2_vec) print (num)

 

转载于:https://www.cnblogs.com/students/p/10334087.html

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
namespace ServiceRanking { /// <summary> /// Summary description for TF_IDFLib. /// </summary> public class TFIDFMeasure { private string[] _docs; private string[][] _ngramDoc; private int _numDocs=0; private int _numTerms=0; private ArrayList _terms; private int[][] _termFreq; private float[][] _termWeight; private int[] _maxTermFreq; private int[] _docFreq; public class TermVector { public static float ComputeCosineSimilarity(float[] vector1, float[] vector2) { if (vector1.Length != vector2.Length) throw new Exception("DIFER LENGTH"); float denom=(VectorLength(vector1) * VectorLength(vector2)); if (denom == 0F) return 0F; else return (InnerProduct(vector1, vector2) / denom); } public static float InnerProduct(float[] vector1, float[] vector2) { if (vector1.Length != vector2.Length) throw new Exception("DIFFER LENGTH ARE NOT ALLOWED"); float result=0F; for (int i=0; i < vector1.Length; i++) result += vector1[i] * vector2[i]; return result; } public static float VectorLength(float[] vector) { float sum=0.0F; for (int i=0; i < vector.Length; i++) sum=sum + (vector[i] * vector[i]); return (float)Math.Sqrt(sum); } } private IDictionary _wordsIndex=new Hashtable() ; public TFIDFMeasure(string[] documents) { _docs=documents; _numDocs=documents.Length ; MyInit(); } private void GeneratNgramText() { } private ArrayList GenerateTerms(string[] docs) { ArrayList uniques=new ArrayList() ; _ngramDoc=new string[_numDocs][] ; for (int i=0; i < docs.Length ; i++) { Tokeniser tokenizer=new Tokeniser() ; string[] words=tokenizer.Partition(docs[i]); for (int j=0; j < words.Length ; j++) if (!uniques.Contains(words[j]) ) uniques.Add(words[j]) ; } return uniques; } private static object

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值