wordto

import numpy as np
import tensorflow as tf
from gensim.models import word2vec #py文件
from gensim.models import Word2Vec #类
from gensim.models import KeyedVectors
from tensorflow.python.ops.rnn import dynamic_rnn
import re


def clean_str(string): #去掉文本中的无效字符并切分单词
    string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
    string = re.sub(r"\'s", " \'s", string)
    string = re.sub(r"\'ve", " \'ve", string)
    string = re.sub(r"n\'t", " n\'t", string)
    string = re.sub(r"\'re", " \'re", string)
    string = re.sub(r"\'d", " \'d", string)
    string = re.sub(r"\'ll", " \'ll", string)
    string = re.sub(r",", " , ", string)
    string = re.sub(r"!", " ! ", string)
    string = re.sub(r"\(", " \( ", string)
    string = re.sub(r"\)", " \) ", string)
    string = re.sub(r"\?", " \? ", string)
    string = re.sub(r"\,", "", string)
    string = re.sub(r"\s{2,}", " ", string)
    return string.strip().lower()

#定义变量
fan=r'C:\Users\Administrator\Desktop\again\data\rt-polarity.neg'
new_text=r'C:\Users\Administrator\Desktop\again\data\fan.txt'
zheng=r'C:\Users\Administrator\Desktop\again\data\rt-polarity.pos'
list_label=[]

#写入副样本
list_flase= open(fan,encoding='utf-8').readlines()
with open(new_text,'w') as f1:
    out=40000
    count=0
    for i in list_flase:
        a=clean_str(i)
        f1.write(a+'\n')
        list_label.append([0,1])
        count+=1
        if count==out:
            break
#写入正样本
list_true= open(zheng,encoding='utf-8').readlines()
with open(new_text,'a') as f1:
    out = 40000
    count = 0
    for i in list_true:
        a=clean_str(i)
        f1.write(a+'\n')
        list_label.append([1,0])
        count += 1
        if count == out:
            break
list_label=np.array(list_label)



# sentences = word2vec.Text8Corpus(u'C:/Users/Administrator/Desktop/again/data/fan.txt')
# print(sentences)

# 训练模型,部分参数如下
# model = word2vec.Word2Vec(sentences, size=100, hs=1, min_count=1, window=3)

#保存
# model.save(u'C:/Users/Administrator/Desktop/again/day/file')
# model.save(u'C:/Users/Administrator/Desktop/again/day/file/text.model')
# model.save_word2vec_format(u'C:/Users/Administrator/Desktop/again/day/file/text.model')

#打开
# m = KeyedVectors.load_word2vec_format('C:/Users/Administrator/Desktop/again/day/file/text.model',encoding='utf-8', binary=False)
# model = word2vec.Word2Vec()
# m1 = model.wv.load_word2vec_format(u"C:/Users/Administrator/Desktop/again/day/file/text.model", binary = False)
m2=word2vec.Word2Vec.load(u"C:/Users/Administrator/Desktop/again/day/file/text.model") #和上面相同



# 计算两个词向量的相似度
try:
    print('-----------------分割线----------------------------')
    sim1 = m2.similarity(u'good', u'bad')
    sim2 = m2.similarity(u'goods', u'goods')
except KeyError:
    sim1 = 0
    sim2 = 0
print(u'中央企业 和 事业单位 的相似度为 ', sim1)
print(u'人民教育网 和 新闻网 的相似度为 ', sim2)


print('-----------------分割线----------------------------')
list_hang=list(open(new_text,'r').readlines())
# x_data=[[list(m2[a]) for a in i.split(' ')] for i in list_hang]
max_length=max([len(i.split(' ')) for i in list_hang])
print(max_length)

x_data=[]
for i in list_hang:
    list_word=[]
    for ia in range(max_length):
        a=i.split(' ')
        try:
            word=a[ia]
            list_word.append(list(m2[word]))
        except:
            list_word.append(np.zeros((100)))
    x_data.append(list_word)



np.random.seed(222)
oder=np.random.permutation(len(x_data))
x_data=np.array(x_data)[oder]
list_label=list_label[oder]

print(list_label.shape)
print(np.array(x_data).shape)#(10662, 53, 100)


n_classes=2
filter_classes=100
hiddending=200

traing_top=10
lerning_rata=0.1
batch_size=8000


def mni_batch(bit_size):
    global index
    xx=x_data[index:index+bit_size]
    yy=list_label[index:index+bit_size]
    index+=bit_size
    return xx,yy


X=tf.placeholder(tf.float32,shape=[None,max_length,filter_classes])
Y=tf.placeholder(tf.int64,shape=[None,2])

cell01=tf.contrib.rnn.LSTMCell(hiddending)
# cell02=tf.contrib.rnn.LSTMCell(hiddending)
# mrt=tf.contrib.rnn.MultiRNNCell([cell01,cell02])
outputs,_starts=dynamic_rnn(cell01,X,dtype=tf.float32)
print(outputs.shape)
print(outputs[:,-1].shape)

#全连接
# outputs = tf.reshape(outputs,[-1,hiddending])
outputs =tf.contrib.layers.fully_connected(outputs[:,-1],n_classes,activation_fn=None)

prediction=tf.argmax(outputs,1)

#损失
# outputs = tf.reshape(outputs,[batch_size,max_length,n_classes])
# weight=tf.ones([batch_size,max_length])
# cost=tf.reduce_mean(tf.contrib.seq2seq.sequence_loss(outputs,Y,weight))
# cost=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=outputs,labels=Y))
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs,labels=Y))
train=tf.train.AdamOptimizer(lerning_rata).minimize(cost)

accuracy=tf.reduce_mean(tf.to_float(tf.equal(prediction,tf.argmax(Y,1))))

with tf.Session() as sees:
    sees.run(tf.global_variables_initializer())
    for i in range(traing_top):
        index=0
        avg_cost=0
        count_pi=int(len(x_data)//200)
        for a in range(count_pi):
            xx,yy=mni_batch(200)
            loss,acc,_=sees.run([cost,accuracy,train],feed_dict={X:xx,Y:yy})
            avg_cost+=loss/count_pi
        print(i+1,loss,acc)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值