关键词-句子-最大相似度-维度

首先生成生成 维度为 在最大相似度的向量


# -*- coding:UTF-8 -*-
from sklearn import svm  #SVM导入
import codecs
from __future__ import division #除法


#词向量导入
import sys
import codecs   #可以以特定编码打开文件
import jieba
import jieba.posseg as pseg
reload(sys)               #zzh说这种方法不好,不要再用了!!!  可是真的很好用啊 QAQ
sys.setdefaultencoding('utf-8')
import gensim

# model = gensim.models.Word2Vec.load("22620491.model")#训练集测试集,要改输入文本,运行两边,输出xljcsj
model = gensim.models.KeyedVectors.load_word2vec_format('news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
word_vec = model.wv
del model     #把模型给word_vec,所以Model删掉。

ekey=[] #35个关键词
keywords=codecs.open("keywords.txt","r","utf-8-sig")
lines=keywords.readlines()
for line in lines:
    words=line.split(" ",6)
    for word in words:
        print word
        word=word.replace("\r\n","")        
        ekey.append(word)    #word_vec输入必须要unicode才行。        
print(ekey)
for i in ekey: 
    word_vec[i]
# print(word_vec[ekey[0]])
# print("end")
# print(len(ekey))
keywords.close()

smlrt_svm=codecs.open("similarity_svm_csj.txt","w","utf-8-sig")

sentence=codecs.open("csj_fenci.txt","r","utf-8-sig")
lines=sentence.readlines()
for line in lines:
    stc=[]
    e=[] #向量
    if line.split("  ",1)[1]!="\r\n":  #除掉空行
        words=line.split("  ",1)[1].split(" ")
        for word in words:
            word=word.replace("\r\n","")
            stc.append(word) 
        print(stc)
        
        for key in ekey:
            maxs=-1
            for i in stc:
                try:                   
                    s=word_vec.similarity(key, i)
                    if s>maxs:
                        maxs=s
#                     print(s)
                except:                
                    continue
            if maxs==-1:
                maxs=0
                e.append(maxs)
            else:
                e.append(maxs)
        print(e)
        smlrt_svm.write(line.split("  ",1)[0].split(" ",1)[0]+" "+str(e)+"\r\n")

smlrt_svm.close()

然后对其进行SVM多分类


xlj = codecs.open("similarity_svm_xlj.txt","r","utf-8-sig")
# doc = open("res.txt","w")

lines = xlj.readlines()
x=[]
y=[]

zero=[]
for i in range(0,35):
    zero.append(0)

for line in lines:
    a=line.split(" ",1)[1].replace("[","").replace("]","").split(",")
    a = map(eval, a)

    if a!= zero:         #去掉没有词向量的特征向量
        x.append(a)
        b = int(line.split(" ", 1)[0])
        y.append(b)

xlj.close()
clf = svm.SVC(decision_function_shape='ovo')
clf.fit(x, y)


# print (len(x))
# print (len(y))
# print (len(a))
x_test=[]
y_test=[]

csj = codecs.open("similarity_svm_csj.txt","r","utf-8-sig")
lines = csj.readlines()

print(len(lines))

for line in lines:
    a=line.split(" ",1)[1].replace("[","").replace("]","").split(",")
    a = map(eval, a)

    if a!= zero:         #去掉没有词向量的特征向量
        x_test.append(a)
        b = int(line.split(" ", 1)[0])
        y_test.append(b)    
        

print len(x_test)
print len(y_test)
print x_test
# y_hat = clf.predict(x)
print clf.score(x_test, y_test)  #全部分类准确率


le,ai,nu,jing,wu=0,0,0,0,0   #五类分类准确率
sumle,sumai,sumnu,sumjing,sumwu=0,0,0,0,0
for i in range(len(x_test)):
    if y_test[i]==0:
        sumle+=1
        if clf.predict([x_test[i]])==y_test[i] :                
            le=le+1
    elif y_test[i]==1:
        sumai+=1
        if clf.predict([x_test[i]])==y_test[i] :                
            ai+=1
    elif y_test[i]==2:
        sumnu+=1
        if clf.predict([x_test[i]])==y_test[i] :                
            nu+=1
    elif y_test[i]==3:
        sumjing+=1
        if clf.predict([x_test[i]])==y_test[i] :                
            jing+=1
    elif y_test[i]==4:
        sumwu+=1
        if clf.predict([x_test[i]])==y_test[i] :                
            wu+=1
print(le/sumle,ai/sumai,nu/sumnu,jing/sumjing,wu/sumwu)
print(le,ai,nu,jing,wu)
print(sumle,sumai,sumnu,sumjing,sumwu)




csj.close()
# for line in lines:
#     X=line.split(" ",1)[1].replace("[","").replace("]","").split(",")
#     X = map(eval, X)
#     res=clf.predict([X])
# #     print(clf.predict([X]))  # 预测
#     if clf.predict([X]).tolist()[0] == int(line.split(" ",1)[0]):
#         cnt=cnt+1
#         print(line.split(" ",1)[0])
# print cnt

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值