# -*- coding: utf-8 -*-"""
Basic word2vec example
"""from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
import zipfile
import tensorflow as tf
import numpy as np
from six.moves import urllib
import urllib
# step1: download the data
url = 'http://mattmahoney.net/dc/'defmaybe_download(filename, expected_bytes):"""
Download a file if not present, and make sure it is the right size
:param filename:
:param expected_bytes:
:return:
"""ifnot os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print("found and verified", filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify' + filename + 'Can you get to it with a browser?')
return filename
filename = maybe_download('../../data/corpus/text8.zip', 31344016)
# read the data into a list of strings.defread_data(filename):"""
Extract the first file enclosed in a zip file as a list of words.
:param filename:
:return:
"""with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000defbuild_dataset(words, n_words):"""
process raw input to dataset.
构建输入数据以及形成了代码单词对照表,data将被用于训练模型,而dictionary将可以作为最后的查询矢量及单词关系的翻译本
:param words: a list of words
:param n_words: the number of words
:return:
'count': a list ,[['UNK', count], (word1, count1), (word2, count2), (word3, count3)...], most common n_words
'dictionary': {word1: index1, word2: index2, word3: index3...}, most common n_words
'data': a list, [index1, index2, index3, ...], all indexes of words and the index of 'UNK' is zero ,index表示词频排名
'reversed_dictionary': a dictionary, the reversed dictionary of 'dictionary'
"""# 词频统计
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
# 对count的词频进行整理,word作为key,排行顺序作为value
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
# 构建输入数据data,每一项为对应word的排名
data = list()
unk_count = 0for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0# dictionary['UNK]
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reversed_dictionary = build_dataset(vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory
print('Most common words (+UNK)', count[: 5])
print('Sample data', data[: 10], [reversed_dictionary[i] for i in data[: 10]])
data_index = 0# step3: Function to generate a training batch for the skip-gram model.defgenerate_batch(batch_size, num_skips, skip_window):"""
生成word2vec训练样本
:param batch_size: 每个批次训练样本的多少
:param num_skips: How many times to reuse an input to generate a label
:param skip_window: How many words to consider left and right
:return:
‘batch‘:
'labels':
"""global data_index
assert batch_size % num_skips == 0assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=batch_size, dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1# 队列长度
buffer = collections.deque(maxlen=span) # 使用双端队列 buffer来存取w的上下文的id# 初始化for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# 移动窗口获取批量数据for i in range(batch_size // num_skips): # how many num_skips in a batch
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window] # extract the middle wordfor j in range(num_skips):
while target in targets_to_avoid: # context中的word,一个只取一次
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index]) # update the buffer, append the next word to buffer
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reversed_dictionary[batch[i]], '->', labels[i, 0], reversed_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model
batch_size = 128
embedding_size = 128# Dimensions of the Embedding vector
skip_window = 3# How many words to consider left and right
num_skips = 4# How many times to reuse an input to generate a label# We pick a random validation set to sample nearest neighbors. Here we limit the# validation samples to the words that have a low numeric ID, which by# construction are also the most frequent.
valid_size = 16# Random set of words to evaluate similarity on.
valid_window = 100# Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64# Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# input data
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.device('/cpu: 0'):
# Look up embeddings for inputs
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the Variables for the NCE loss
nce_weights = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# compute the average NCE loss for the batch# tf.nce_loss automatically draws a new sample of the negative labels each time we evaluate the loss
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
optimizer = tf.train.GradientDescentOptimizer(0.8).minimize(loss)
# compute the cosine similarity between minibatch examples and all embeddings
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# add variable initializer
init = tf.global_variables_initializer()
# step five: Begin training
num_steps = 100001with tf.Session(graph=graph) as session:
init.run()
print("initialized")
average_loss = 0for step in range(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# perform one update step by evaluating the optimizer op
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0# Note that this is expensive (~20% slowdown if computed every 500 steps)# 计算验证单词和全部单词的相似度,验证单词最相似8个单词展示if step % 10000 == 0:
sim = similarity.eval() # tf.Tensor.eval(feed_dict=None, session=None)for i in range(valid_size):
valid_word = reversed_dictionary[valid_examples[i]]
top_k = 8# number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1] # 负号表示降序排列
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reversed_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
# execute this tensor in a session
final_embeddings = normalized_embeddings.eval()
# step six: Visualize the embeddingsdefplot_with_labels(low_dim_embs, labels, filename='tsne.png'):assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inchesfor i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
# pylint: disable=g-import-not-at-topfrom sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) # 降到2维
labels = [reversed_dictionary[i] for i in range(plot_only)]
plot_with_labels(low_dim_embs, labels)