基于Tensorflow实现word2vec

本文详细介绍了如何使用Python和TensorFlow实现Word2Vec模型,包括数据预处理、词向量构建、损失函数定义以及训练过程,最后展示了模型在测试样例上的性能评估。
摘要由CSDN通过智能技术生成
import collections
import os
import random
import urllib
import zipfile
import numpy as np
import tensorflow as tf

# 训练参数
learning_rate = 0.1
batch_size = 128
num_steps = 3000000
display_step = 10000
eval_step = 200000

# 测试样例
eval_words = ['nine', 'of', 'going', 'hardware', 'american', 'britain']

# Word2Vec 参数
embedding_size = 200 # 词向量维度
max_vocabulary_size = 50000 # 语料库词语数
min_occurrence = 10 # 最小词频
skip_window = 3 # 左右窗口大小
num_skips = 2 # 一次制作多少个输入输出对
num_sampled = 64 # 负采样

# 加载训练数据,其实什么数据都行
data_path = 'text8.zip'
with zipfile.ZipFile(data_path) as f:
    text_words = f.read(f.namelist()[0]).lower().split()

print(len(text_words))

# 创建一个计数器,计算每个词出现了多少次
count = [('UNK', -1)]
# 基于词频返回max_vocabulary_size个常用词
count.extend(collections.Counter(text_words).most_common(max_vocabulary_size - 1))

# 默认已经是排序好了的
print(count[0:10])

# 别忘了,咱们还设置了min_occurrence参数,需要判断每一个词是否满足给定条件
# 剔除掉出现次数少于'min_occurrence'的词
for i in range(len(count) - 1, -1, -1):# 从start到end每次step多少
    if count[i][1] < min_occurrence:
        count.pop(i)
    else:
        # 判断时,从小到大排序的,所以跳出时候剩下的都是满足条件的
        break

### 词-ID映射
# 计算语料库大小
vocabulary_size = len(count)
# 每个词都分配一个ID
word2id = dict()
for i, (word, _)in enumerate(count):
    word2id[word] = i

print(word2id)

# 所有词转换成ID

data = list()
unk_count = 0
for word in text_words:
    # 全部转换成id
    index = word2id.get(word, 0)
    if index == 0:
        unk_count += 1
    data.append(index)
count[0] = ('UNK', unk_count)
id2word = dict(zip(word2id.values(), word2id.keys()))

print("Words count:", len(text_words))
print("Unique words:", len(set(text_words)))
print("Vocabulary size:", vocabulary_size)
print("Most common words:", count[:10])

# 构建所需训练数据

data_index = 0

def next_batch(batch_size, num_skips, skip_window):
    global data_index
    assert batch_size % num_skips == 0
    assert num_skips <= 2 * skip_window
    batch = np.ndarray(shape=(batch_size), dtype=np.int32)
    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
    # get window size (words left and right + current one).
    span = 2 * skip_window + 1 #7为窗口,左3右3中间1
    buffer = collections.deque(maxlen=span)#创建一个长度为7的队列
    if data_index + span > len(data):#如果数据被滑完一遍了
        data_index = 0
    buffer.extend(data[data_index:data_index + span])#队列里存的是当前窗口,例如deque([5234, 3081, 12, 6, 195, 2, 3134], maxlen=7)
    data_index += span
    for i in range(batch_size // num_skips):#num_skips表示取多少组不同的词作为输出,此例为2
        context_words = [w for w in range(span) if w != skip_window]#上下文就是[0, 1, 2, 4, 5, 6]
        words_to_use = random.sample(context_words, num_skips)#在上下文里随机选2个候选词
        for j, context_word in enumerate(words_to_use):#遍历每一个候选词,用其当做输出也就是标签
            batch[i * num_skips + j] = buffer[skip_window]#输入都为当前窗口的中间词,即3
            labels[i * num_skips + j, 0] = buffer[context_word]#用当前候选词当做标签
        if data_index == len(data):
            buffer.extend(data[0:span])
            data_index = span
        else:
            buffer.append(data[data_index])#之前已经传入7个词了,窗口要右移了,例如原来为[5234, 3081, 12, 6, 195, 2, 3134],现在为[3081, 12, 6, 195, 2, 3134, 46]
            data_index += 1

    data_index = (data_index + len(data) - span) % len(data)
    return batch, labels

with tf.device('/cpu:0'):
    embedding = tf.Variable(tf.random.normal([vocabulary_size, embedding_size])) #维度:47135, 200
    nce_weights = tf.Variable(tf.random.normal([vocabulary_size, embedding_size]))
    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))

# 通过tf.nn.embedding_lookup函数将索引转换成词向量

def get_embedding(x):
    with tf.device('/cpu:0'):
        x_embed = tf.nn.embedding_lookup(embedding, x)
        return x_embed

# 损失函数定义
# 先分别计算出正样本和采样出的负样本对应的output和label
# 再通过 sigmoid cross entropy来计算output和label的loss

def nce_loss(x_embed, y):
    with tf.device('/cpu:0'):
        y = tf.cast(y, tf.int64)
        loss = tf.reduce_mean(
            tf.nn.nce_loss(weights=nce_weights,
                           biases=nce_biases,
                           labels=y,
                           inputs=x_embed,
                           num_sampled=num_sampled,#采样出多少个负样本
                           num_classes=vocabulary_size))
        return loss

# 测试观察模块
# Evaluation.
def evaluate(x_embed):
    with tf.device('/cpu:0'):
        # Compute the cosine similarity between input data embedding and every embedding vectors
        x_embed = tf.cast(x_embed, tf.float32)
        x_embed_norm = x_embed / tf.sqrt(tf.reduce_sum(tf.square(x_embed)))#归一化
        embedding_norm = embedding / tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keepdims=True), tf.float32)#全部向量的
        cosine_sim_op = tf.matmul(x_embed_norm, embedding_norm, transpose_b=True)#计算余弦相似度
        return cosine_sim_op

# SGD
optimizer = tf.optimizers.SGD(learning_rate)

# 迭代优化
def run_optimization(x, y):
    with tf.device('/cpu:0'):
        with tf.GradientTape() as g:
            emb = get_embedding(x)
            loss = nce_loss(emb, y)

        # 计算梯度
        gradients = g.gradient(loss, [embedding, nce_weights, nce_biases])

        # 更新
        optimizer.apply_gradients(zip(gradients, [embedding, nce_weights, nce_biases]))


# 待测试的几个词
x_test = np.array([word2id[w.encode('utf-8')] for w in eval_words])

# 训练
for step in range(1, num_steps + 1):
    batch_x, batch_y = next_batch(batch_size, num_skips, skip_window)
    run_optimization(batch_x, batch_y)

    if step % display_step == 0 or step == 1:
        loss = nce_loss(get_embedding(batch_x), batch_y)
        print("step: %i, loss: %f" % (step, loss))

    # Evaluation.
    if step % eval_step == 0 or step == 1:
        print("Evaluation...")
        sim = evaluate(get_embedding(x_test)).numpy()
        for i in range(len(eval_words)):
            top_k = 8  # 返回前8个最相似的
            nearest = (-sim[i, :]).argsort()[1:top_k + 1]
            log_str = '"%s" nearest neighbors:' % eval_words[i]
            for k in range(top_k):
                log_str = '%s %s,' % (log_str, id2word[nearest[k]])
            print(log_str)

  • 4
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值