tensorflow(7)word2vec-embedding

# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import math
import collections
import pickle as pkl
from pprint import pprint
#from pymongo import MongoClient
import re
import jieba
import os.path as path
import os

stop_words = []
with open('stop_words.txt',encoding= 'utf-8') as f:
    line = f.readline()
    while line:
        stop_words.append(line[:-1])
        line = f.readline()
stop_words = set(stop_words)
print('停用词读取完毕,共{n}个单词'.format(n=len(stop_words)))

# step2 读取文本,预处理,分词,得到词典
raw_word_list = []  # 存全部的词
sentence_list = []  # 每一行作为一个list,存入list
with open('2800.txt',encoding='gbk') as f:
    line = f.readline()
    while line:
        # 去掉换行符
        while '\n' in line:
            line = line.replace('\n','')
        # 去掉空格
        while ' ' in line:
            line = line.replace(' ','')
        if len(line)>0: # 如果句子非空
            raw_words = list(jieba.cut(line,cut_all=False)) 
            dealed_words = []
            for word in raw_words:
                if word not in stop_words and word not in ['qingkan520','www','com','http']:
                    raw_word_list.append(word)
                    dealed_words.append(word)
            sentence_list.append(dealed_words)
        line = f.readline()

# Counter之后的word_count是一个词对应频率的字典
word_count = collections.Counter(raw_word_list)
print('文本中总共有{n1}个单词,不重复单词数{n2},选取前30000个单词进入词典'
      .format(n1=len(raw_word_list),n2=len(word_count)))

# 从词库选出频率top30000
word_count = word_count.most_common(30000)
word_list = [x[0] for x in word_count]

class word2vec():
    def __init__(self,
                 vocab_list=None,
                 embedding_size=200,
                 win_len=3, # 上下文窗口选取:单边窗口(左边滑动3个,右边3个)
                 num_sampled=1000, # 负采样个数
                 learning_rate=1.0,
                 logdir='/tmp/simple_word2vec',
                 model_path= None
                 ):

        # 获得模型的基本参数
        self.batch_size     = None # 一批中数据个数, 目前是根据情况来的
        if model_path!=None:
            self.load_model(model_path)
        else:
            # model parameters
            # 如果vocab_list是list则继续执行,不是则报错
            assert type(vocab_list)==list
            self.vocab_list     = vocab_list
            self.vocab_size     = vocab_list.__len__()
            self.embedding_size = embedding_size
            self.win_len        = win_len
            self.num_sampled    = num_sampled
            self.learning_rate  = learning_rate
            self.logdir         = logdir

            self.word2id = {}   # word => id 的映射  将词映射成id
            for i in range(self.vocab_size):
                self.word2id[self.vocab_list[i]] = i

            # train times
            self.train_words_num = 0 # 训练的单词对数
            self.train_sents_num = 0 # 训练的句子数
            self.train_times_num = 0 # 训练的次数(一次可以有多个句子)

            # train loss records
            self.train_loss_records = collections.deque(maxlen=10) # 保存最近10次的误差
            self.train_loss_k10 = 0

        self.build_graph()
        self.init_op()
        if model_path!=None:
            tf_model_path = os.path.join(model_path,'tf_vars')
            self.saver.restore(self.sess,tf_model_path)

    def init_op(self):
        self.sess = tf.Session(graph=self.graph)
        self.sess.run(self.init)
        self.summary_writer = tf.summary.FileWriter(self.logdir, self.sess.graph)

    # tensorflow的计算图
    def build_graph(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            # lable可以是窗口中的词,如今天打篮球,则输入为今天打,lable为篮球  词都为id
            self.train_inputs = tf.placeholder(tf.int32, shape=[self.batch_size])
            self.train_labels = tf.placeholder(tf.int32, shape=[self.batch_size, 1])
            self.embedding_dict = tf.Variable(
                # 设置词向量的维度,每个词维度为embedding_size
                tf.random_uniform([self.vocab_size,self.embedding_size],-1.0,1.0)
            )
            # [n,k]维度的向量
            self.nce_weight = tf.Variable(tf.truncated_normal([self.vocab_size, self.embedding_size],
                                                              stddev=1.0/math.sqrt(self.embedding_size)))
            self.nce_biases = tf.Variable(tf.zeros([self.vocab_size]))

            # 将输入序列向量化,拼接
            # 将train_inputs对于的id的词向量拼接在一起
            embed = tf.nn.embedding_lookup(self.embedding_dict, self.train_inputs) # batch_size

            # 得到NCE损失
            self.loss = tf.reduce_mean(
                tf.nn.nce_loss(
                    weights = self.nce_weight,
                    biases = self.nce_biases,
                    labels = self.train_labels,
                    inputs = embed,
                    num_sampled = self.num_sampled, # 负采样个数
                    num_classes = self.vocab_size  # 全部的训练样本的个数 
                )
            )

            # tensorboard 相关
            tf.summary.scalar('loss',self.loss)  # 让tensorflow记录参数

            # 根据 nce loss 来更新梯度和embedding
            self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)  # 训练操作

            # 计算与指定若干单词的相似度
            self.test_word_id = tf.placeholder(tf.int32,shape=[None])
            
            # 求各词向量的L2模
            vec_l2_model = tf.sqrt(  
                tf.reduce_sum(tf.square(self.embedding_dict),1,keep_dims=True)
            )

            avg_l2_model = tf.reduce_mean(vec_l2_model)
            tf.summary.scalar('avg_vec_model',avg_l2_model)

            # 归一化的向量:单个词的摸/各词向量的L2模
            self.normed_embedding = self.embedding_dict / vec_l2_model
            
            # 在embedding中找test_word_id对应的embedding行
            # self.embedding_dict = norm_vec # 对embedding向量正则化
            test_embed = tf.nn.embedding_lookup(self.normed_embedding, self.test_word_id)
            
            # 有了两个单词的向量之后
            self.similarity = tf.matmul(test_embed, self.normed_embedding, transpose_b=True)

            # 变量初始化
            self.init = tf.global_variables_initializer()

            self.merged_summary_op = tf.summary.merge_all()

            self.saver = tf.train.Saver()

    def train_by_sentence(self, input_sentence=[]):
        #  input_sentence: [sub_sent1, sub_sent2, ...]
        # 每个sub_sent是一个单词序列,例如['这次','大选','让']
        sent_num = input_sentence.__len__()
        # 一个句子为一个batch
        # 
        batch_inputs = []
        batch_labels = []
        for sent in input_sentence:
            for i in range(sent.__len__()):
                # 滑动窗口,可以选择第i个词的左边和右边作为上下文,预测出第i个词
                start = max(0,i-self.win_len)
                end = min(sent.__len__(),i+self.win_len+1)
                # 在滑动窗口中进行遍历
                for index in range(start,end):
                    if index == i:
                        continue
                    else:
                        input_id = self.word2id.get(sent[i])
                        label_id = self.word2id.get(sent[index])
                        if not (input_id and label_id):
                            continue
                        # 将id和label传入batch
                        batch_inputs.append(input_id)
                        batch_labels.append(label_id)
        if len(batch_inputs)==0:
            return
        
        # 将batch转换成列的形式
        batch_inputs = np.array(batch_inputs,dtype=np.int32)
        batch_labels = np.array(batch_labels,dtype=np.int32)
        batch_labels = np.reshape(batch_labels,[batch_labels.__len__(),1])

        feed_dict = {
            self.train_inputs: batch_inputs,
            self.train_labels: batch_labels
        }
        _, loss_val, summary_str = self.sess.run([self.train_op,self.loss,self.merged_summary_op], feed_dict=feed_dict)

       
        # train loss
        self.train_loss_records.append(loss_val)
        # self.train_loss_k10 = sum(self.train_loss_records)/self.train_loss_records.__len__()
        self.train_loss_k10 = np.mean(self.train_loss_records)
        
        
        
        if self.train_sents_num % 10000 == 0 :
            # tf.summary.FileWriter.add_summary(summary_str,self.train_sents_num)
            print("{a} sentences dealed, loss: {b}"
                  .format(a=self.train_sents_num,b=self.train_loss_k10))
        
        # train times
        self.train_words_num += batch_inputs.__len__()
        self.train_sents_num += input_sentence.__len__()
        self.train_times_num += 1

    def cal_similarity(self,test_word_id_list,top_k=10):
        sim_matrix = self.sess.run(self.similarity, feed_dict={self.test_word_id:test_word_id_list})
        sim_mean = np.mean(sim_matrix)
        sim_var = np.mean(np.square(sim_matrix-sim_mean))
        test_words = []
        near_words = []
        for i in range(test_word_id_list.__len__()):
            test_words.append(self.vocab_list[test_word_id_list[i]])
            # 取相似度的top_k
            nearst_id = (-sim_matrix[i,:]).argsort()[1:top_k+1]
            nearst_word = [self.vocab_list[x] for x in nearst_id]
            near_words.append(nearst_word)
        return test_words,near_words,sim_mean,sim_var

    def save_model(self, save_path):

        if os.path.isfile(save_path):
            raise RuntimeError('the save path should be a dir')
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        # 记录模型各参数
        model = {}
        var_names = ['vocab_size',      # int       model parameters
                     'vocab_list',      # list
                     'learning_rate',   # int
                     'word2id',         # dict
                     'embedding_size',  # int
                     'logdir',          # str
                     'win_len',         # int
                     'num_sampled',     # int
                     'train_words_num', # int       train info
                     'train_sents_num', # int
                     'train_times_num', # int
                     'train_loss_records',  # int   train loss
                     'train_loss_k10',  # int
                     ]
        for var in var_names:
            model[var] = eval('self.'+var)

        param_path = os.path.join(save_path,'params.pkl')
        if os.path.exists(param_path):
            os.remove(param_path)
        with open(param_path,'wb') as f:
            pkl.dump(model,f)

        # 记录tf模型
        tf_path = os.path.join(save_path,'tf_vars')
        if os.path.exists(tf_path):
            os.remove(tf_path)
        self.saver.save(self.sess,tf_path)

    def load_model(self, model_path):
        if not os.path.exists(model_path):
            raise RuntimeError('file not exists')
        param_path = os.path.join(model_path,'params.pkl')
        with open(param_path,'rb') as f:
            model = pkl.load(f)
            self.vocab_list = model['vocab_list']
            self.vocab_size = model['vocab_size']
            self.logdir = model['logdir']
            self.word2id = model['word2id']
            self.embedding_size = model['embedding_size']
            self.learning_rate = model['learning_rate']
            self.win_len = model['win_len']
            self.num_sampled = model['num_sampled']
            self.train_words_num = model['train_words_num']
            self.train_sents_num = model['train_sents_num']
            self.train_times_num = model['train_times_num']
            self.train_loss_records = model['train_loss_records']
            self.train_loss_k10 = model['train_loss_k10']


# 创建模型
w2v = word2vec(vocab_list=word_list,    # 词典集
               embedding_size=200,
               win_len=2,
               learning_rate=1,
               num_sampled=100,         # 每次训练取出100个负采样样本
               logdir='/tmp/280')       # tensorboard记录地址

# 训练
num_steps = 60000
for i in range(num_steps):
    #print (i%len(sentence_list))
    if(i%10000==0):
        print(i)
    # 按句子进行训练
    sent = sentence_list[i%len(sentence_list)]
    w2v.train_by_sentence([sent])

w2v.save_model('model')

w2v.load_model('model') 
test_word = ['少女','少年']
test_id = [word_list.index(x) for x in test_word]
test_words,near_words,sim_mean,sim_var = w2v.cal_similarity(test_id)
print (test_words,near_words,sim_mean,sim_var)

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的 Word2Vec 模型的代码示例: ```python import numpy as np import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector # 定义参数 batch_size = 64 embedding_dimension = 5 negative_samples = 8 LOG_DIR = "logs/word2vec_intro" # 语料 corpus_raw = 'He is the king . The king is royal . She is the royal queen ' # 数据预处理 def preprocess_text(text): # 去除标点符号并转化为小写 text = text.lower() text = text.replace('.', ' .') words = text.split() return words words = preprocess_text(corpus_raw) word2int = {} int2word = {} vocab_size = 0 # 构建vocabulary for word in words: if word not in word2int: word2int[word] = vocab_size int2word[vocab_size] = word vocab_size += 1 # 输入和输出的占位符 x_inputs = tf.placeholder(tf.int32, shape=[batch_size]) y_inputs = tf.placeholder(tf.int32, shape=[batch_size, 1]) # 随机选择负样本 embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_dimension], -1.0, 1.0)) softmax_weights = tf.Variable(tf.truncated_normal([vocab_size, embedding_dimension], stddev=0.5 / np.sqrt(embedding_dimension))) softmax_biases = tf.Variable(tf.zeros([vocab_size])) embed = tf.nn.embedding_lookup(embeddings, x_inputs) # 损失函数 loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed, labels=y_inputs, num_sampled=negative_samples, num_classes=vocab_size)) # 优化器 optimizer = tf.train.AdagradOptimizer(0.5).minimize(loss) # 初始化变量 init = tf.global_variables_initializer() # 保存embedding的metadata file_writer = tf.summary.FileWriter(LOG_DIR) metadata = os.path.join(LOG_DIR, 'metadata.tsv') with open(metadata, 'w') as metadata_file: for i in range(vocab_size): metadata_file.write('{}\n'.format(int2word[i])) # 运行会话 with tf.Session() as sess: # 初始化变量 sess.run(init) total_loss = 0 writer = tf.summary.FileWriter(LOG_DIR, sess.graph) # 训练模型 for epoch in range(1000): batch_inputs, batch_labels = generate_batch(words, batch_size, window_size) feed_dict = {x_inputs: batch_inputs, y_inputs: batch_labels} # 梯度下降 _, loss_val = sess.run([optimizer, loss], feed_dict=feed_dict) total_loss += loss_val if epoch % 100 == 0: print("Epoch ", epoch, "Avg loss: ", total_loss / (epoch + 1)) # 保存embedding embedding_var = tf.Variable(embeddings, name='embedding') sess.run(embedding_var.initializer) config = projector.ProjectorConfig() embedding = config.embeddings.add() embedding.tensor_name = embedding_var.name embedding.metadata_path = metadata projector.visualize_embeddings(file_writer, config) # 关闭会话 sess.close() ``` 这个代码示例中使用了 TensorFlow 框架,实现了一个简单的 Word2Vec 模型。其中包括了数据预处理、构建词汇表、定义输入和输出占位符、随机选择负样本、定义损失函数、优化器等步骤。同时,为了可视化词向量,还使用了 TensorBoard 工具。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值