[deeplearning-023] tf的word2vec

1.主要参考文档

https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/word2vec.py#L70

但源码有各种错误,另外生成输入输出对的地方,也没有足够注释。

 

2.源码

# 主要引用源码 https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/word2vec.py

# 关于word2vec, 参考 https://zhuanlan.zhihu.com/p/26306795 和 http://licstar.net/archives/328
# 关于skip gram, 参考文档 https://www.leiphone.com/news/201706/PamWKpfRFEI42McI.html
"""
word2vec本质就是自编码auto-encode。
输入是一个one-hot编码的word,输出是从句子里找的输入单词的若干前后单词,从文本里把所有可能的(输入word,输出word)都找到,然后批量训练一个单隐层的神经网络,
输入层和隐层之间的权重矩阵 就是 词向量。根据“Natural Language Processing (Almost) from Scratch”,词向量特征解决所有NLP问题。
"""
import collections
import os
import random
import urllib
import zipfile

import numpy as np
import tensorflow as tf

# 训练参数
LEARNING_RATE = 0.1
BATCH_SIZE = 128
NUM_STEPS = 3000000
DISPLAY_STEP = 10000
EVAL_STEP = 200000

# 训练参数
LEARNING_RATE = 0.1
BATCH_SIZE = 128
NUM_STEPS = 30000
DISPLAY_STEP = 1000
EVAL_STEP = 1000

# 待处理词汇
# eval_words = ['five', 'of', 'going', 'hardware', 'american', 'britain']
eval_words = [b'nine', b'hardware', b'china']

# word2vec参数
EMBEDDING_SIZE = 200  # Dimension of the embedding vector
MAX_VOCABULARY_SIZE = 50000  # Total number of different words in the vocabulary
MIN_OCCURRENCE = 10  # Remove all words that does not appears at least n times
SKIP_WINDOW = 3  # How many words to consider left and right
NUM_SKIPS = 2  # How many times to reuse an input to generate a label
NUM_SAMPLED = 64  # Number of negative examples to sample

#text8是一个长文件,只有一行,去掉了段落标点符号等等
url = 'http://mattmahoney.net/dc/text8.zip'
data_path = 'text8.zip'
if not os.path.exists(data_path):
    print("Downloading the dataset... (It may take some time)")
    filename, _ = urllib.request.urlretrieve(url, data_path)
    print("Done!")
with zipfile.ZipFile(data_path) as f:
    text_words = f.read(f.namelist()[0]).lower().split()

#text_words是一个存放超长句子单词的集合。就好比,一堆语料文章,去掉所有段落和标点符号,剩下的东西。
# print(text_words)
# exit(1)

#统计每个单词出现次数
count = [('UNK', -1)]
# most_common没有指定数量,因此,是全部单词都会读入
count.extend(collections.Counter(text_words).most_common(MAX_VOCABULARY_SIZE - 1))

# print(count)
# exit(1)

#删除出现次数太小的单词
for i in range(len(count) - 1, 0, -1):
    if count[i][1] < MIN_OCCURRENCE:
        count.pop(i)
    else:
        break
vocabulary_size = len(count)

#计算一个单词在count里的位置
word2id = dict()
for i, (word, _) in enumerate(count):
    word2id[word] = i

# print('the id = ', word2id[b'the'])
# exit(1)

#把有序文本的单词,转化成单词的id,依次存入到data。也就是说,data用id表示有序文本。
data = list()
unk_count = 0
for word in text_words:
    index = word2id.get(word, 0)
    if index == 0:
        unk_count += 1
    data.append(index)

#更新低频词的数量
count[0] = ('UNK', unk_count)

#id和单词的对应关系
id2word = dict(zip(word2id.values(), word2id.keys()))

# print(id2word)
# exit(1)

#data的索引,为批量训练做准备
data_index = 0

# 给skip-gram模型 生成 训练样本和目标值
def next_batch(batch_size, num_skips, skip_window):
    print('next batch run\n')
    global data_index
    assert batch_size % num_skips == 0
    assert num_skips <= 2 * skip_window
    print('batch_size = ', batch_size)
    print('num_skips = ', num_skips)
    print('skip_window = ', skip_window)
    # 行向量
    batch = np.ndarray(shape=(batch_size), dtype=np.int32)
    # 列向量
    labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
    # get window size (words left and right + current one)
    #span,就是从有序文本里 依次选取的一段文字,包括若干单词。
    span = 2 * skip_window + 1
    print('span = ', span)
    buffer = collections.deque(maxlen=span)
    if data_index + span > len(data):
        data_index = 0
    print('data=', data[data_index:data_index + span])
    #从有序文本里,取出 span长度的句子,放到buffer备用
    buffer.extend(data[data_index:data_index + span])
    print('buffer = ', buffer)
    data_index += span
    # batch_size//num_skips是整除,结果是商,是整数
    # num_skips,表示input单词的重用次数,这里除一下,也就是决定有多少个input单词
    # skip_window,表示从input单词的左侧和右侧分别选取几个单词组成windows
    for i in range(batch_size // num_skips):
        print('i=', i)
        #第skip_window个单词对应的是input单词,那么,在span里,除了skip_window之外的单词,就是上下文单词,也就是context_words
        context_words = [w for w in range(span) if w != skip_window]
        #因为input单词要重用num_skips次,因此要从context_words随机选num_skips个单词以便组成(input_word, output_word)
        words_to_use = random.sample(context_words, num_skips)
        for j, context_word in enumerate(words_to_use):
            #这是input单词
            batch[i * num_skips + j] = buffer[skip_window]
            #这是output单词
            labels[i * num_skips + j, 0] = buffer[context_word]
        #每执行一次,data_index增加一次,由此,input_word也逐步向后移动,buffer上的context上下文也从前向后移动。
        if data_index == len(data):
            buffer.extend(data[0:span])
            data_index = span
        else:
            buffer.append(data[data_index])
            data_index += 1
    #回退一下,避免出现挑词,乎略最后一个单词
    data_index = (data_index + len(data) - span) % len(data)
    print('batch = ', batch)
    print('labels = ', labels)
    return batch, labels


# 样本属性
X = tf.placeholder(tf.int32, shape=[None])
# 样本目标值
Y = tf.placeholder(tf.int32, shape=[None, 1])

# Ensure the following ops & var are assigned on CPU
# (some ops are not compatible on GPU)
with tf.device('/cpu:0'):
    # 把一个word嵌入到EMBEDDING_SIZE的一维向量。如果把所有的词做嵌入,就是[vocabulary_size,EMBEDDING_SIZE]
    embedding = tf.Variable(tf.random_normal([vocabulary_size, EMBEDDING_SIZE]))
    # X对应的嵌入向量
    X_embed = tf.nn.embedding_lookup(embedding, X)
    # 词的权重
    nce_weights = tf.Variable(tf.random_normal([vocabulary_size, EMBEDDING_SIZE]))
    # 词的bias
    nce_biases = tf.Variable(tf.zeros([vocabulary_size]))

# Compute the average NCE loss for the batch
loss_op = tf.reduce_mean(
    tf.nn.nce_loss(weights=nce_weights,
                   biases=nce_biases,
                   labels=Y,
                   inputs=X_embed,
                   num_sampled=NUM_SAMPLED,
                   num_classes=vocabulary_size))

# Define the optimizer
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss_op)

# Evaluation
# Compute the cosine similarity between input data embedding and every embedding vectors
X_embed_norm = X_embed / tf.sqrt(tf.reduce_sum(tf.square(X_embed)))
embedding_norm = embedding / tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keepdims=True))
cosine_sim_op = tf.matmul(X_embed_norm, embedding_norm, transpose_b=True)

# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

with tf.Session() as sess:
    # Run the initializer
    sess.run(init)

    # Testing data
    x_test = np.array([word2id[w] for w in eval_words])

    average_loss = 0
    for step in range(1, NUM_STEPS + 1):
        # Get a new batch of data
        batch_x, batch_y = next_batch(BATCH_SIZE, NUM_SKIPS, SKIP_WINDOW)
        # Run training op
        _, loss = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
        average_loss += loss

        if step % DISPLAY_STEP == 0 or step == 1:
            if step > 1:
                average_loss /= DISPLAY_STEP
            print("Step " + str(step) + ", Average Loss= " + \
                  "{:.4f}".format(average_loss))
            average_loss = 0

        # Evaluation
        if step % EVAL_STEP == 0 or step == 1:
            print("Evaluation...")
            sim = sess.run(cosine_sim_op, feed_dict={X: x_test})
            for i in range(len(eval_words)):
                top_k = 8  # number of nearest neighbors
                nearest = (-sim[i, :]).argsort()[1:top_k + 1]
                log_str = '"%s" nearest neighbors:' % eval_words[i]
                for k in range(top_k):
                    log_str = '%s %s,' % (log_str, id2word[nearest[k]])
                print(log_str)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值