代码:
# 导入包
import tensorflow as tf
# encoding=utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
代码:
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# 下载数据集
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
# 获取文件相关属性
statinfo = os.stat(filename)
# 比对文件的大小是否正确
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
# 单词表
words = read_data(filename)
# Data size
print('Data size', len(words))
运行结果:
Found and verified text8.zip
Data size 17005207
代码:
# Step 2: Build the dictionary and replace rare words with UNK token.
# 建立字典
# 只留50000个单词,其他的词都归为UNK
vocabulary_size = 50000
def build_dataset(words, vocabulary_size):
count = [['UNK', -1]]
# extend追加一个列表
# Counter用来统计每个词出现的次数
# most_common返回一个TopN列表,只留50000个单词包括UNK
# c = Counter('abracadabra')
# c.most_common()
# [('a', 5), ('r', 2), ('b', 2), ('c', 1), ('d', 1)]
# c.most_common(3)
# [('a', 5), ('r', 2), ('b', 2)]
# 前50000个出现次数最多的词
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
# 生成 dictionary,词对应编号, word:id(0-49999)
# 词频越高编号越小
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
# data把数据集的词都编号
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
# 记录UNK词的数量
count[0][1] = unk_count
# 编号对应词的字典
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
# data 数据集,编号形式
# count 前50000个出现次数最多的词
# dictionary 词对应编号
# reverse_dictionary 编号对应词
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
运行结果:
Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]
Sample data [5234, 3081, 12, 6, 195, 2, 3134, 46, 59, 156] ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used', 'against']
代码:
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ] 3
# 双向队列
buffer = collections.deque(maxlen=span)
# [ skip_window target skip_window ]
# [ skip_window target skip_window ]
# [ skip_window target skip_window ]
# [0 1 2 3 4 5 6 7 8 9 ...]
# t i
# 循环3次
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# 获取batch和labels
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
# 循环2次,一个目标单词对应两个上下文单词
for j in range(num_skips):
while target in targets_to_avoid:
# 可能先拿到前面的单词也可能先拿到后面的单词
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
# 回溯3个词。因为执行完一个batch的操作之后,data_index会往右多偏移span个位置
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
# 打印sample data
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
运行结果:
3081 originated -> 5234 anarchism
3081 originated -> 12 as
12 as -> 3081 originated
12 as -> 6 a
6 a -> 195 term
6 a -> 12 as
195 term -> 2 of
195 term -> 6 a
代码:
# Step 4: Build and train a skip-gram model.
batch_size = 128
# 词向量维度
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
# 从0-100抽取16个整数,无放回抽样
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# 负采样样本数
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
# with tf.device('/cpu:0'):
# 词向量
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# embedding_lookup(params,ids)其实就是按照ids顺序返回params中的第ids行
# 比如说,ids=[1,7,4],就是返回params中第1,7,4行。返回结果为由params的1,7,4行组成的tensor
# 提取要训练的词
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the noise-contrastive estimation(NCE) loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
# 抽取一些常用词来测试余弦相似度
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
# valid_size == 16
# [16,1] * [1*50000] = [16,50000]
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
代码:
# Step 5: Begin training.
num_steps = 100001
final_embeddings = []
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
# 获取一个批次的target,以及对应的labels,都是编号形式的
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
# 计算训练2000次的平均loss
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 20000 == 0:
sim = similarity.eval()
# 计算验证集的余弦相似度最高的词
for i in xrange(valid_size):
# 根据id拿到对应单词
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
# 从大到小排序,排除自己本身,取前top_k个值
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
# 训练结束得到的词向量
final_embeddings = normalized_embeddings.eval()
运行结果:
Initialized
Average loss at step 0 : 302.383636475
Nearest to one: mitochondrial, succumbed, heligoland, ump, slowed, forges, exquisite, hardly,
Nearest to of: conformal, holbach, binitarianism, woodland, globe, coeur, artery, legacy,
Nearest to three: rooks, notorious, geelong, macrinus, airstrip, macy, fled, route,
Nearest to war: kadyrov, burbank, groin, geoffrey, machines, announcers, vascular, accidents,
Nearest to in: omari, scourge, esr, ethica, osman, sex, typhoid, panacea,
Nearest to are: ilium, ordain, reproductive, progesterone, calais, alphabetically, direct, porsche,
Nearest to were: moreau, biochemistry, innsbruck, slum, potencies, uncertainty, capacitive, autumn,
Nearest to the: singularities, differentials, hurriedly, kiribati, neverwinter, kabbalists, owe, jonathan,
Nearest to new: tamar, synthesized, midrashim, klux, tian, veronica, cervix, thermopylae,
Nearest to american: quarterly, psychopathic, motala, nudity, indentured, cooking, melancholia, groningen,
Nearest to they: dct, frye, theirs, athenaeus, obscura, andromache, rial, austere,
Nearest to its: shuffled, socratic, masterminded, post, among, painter, rifles, petty,
Nearest to seven: cautions, expounded, dm, doings, containing, afro, isle, cadre,
Nearest to which: risen, inevitable, toe, fis, geniuses, jim, dictatorship, insufficient,
Nearest to up: colliding, crushes, pritchard, mirza, communicate, soundhole, heir, jail,
Nearest to has: brenner, bulges, scandalous, doorman, hermeticism, escap, decrees, salvator,
Average loss at step 2000 : 113.390164988
Average loss at step 4000 : 52.6187440126
Average loss at step 6000 : 33.286626014
Average loss at step 8000 : 23.7020297694
Average loss at step 10000 : 17.7729295442
Average loss at step 12000 : 14.1117788121
Average loss at step 14000 : 11.771555624
Average loss at step 16000 : 9.96325901306
Average loss at step 18000 : 8.48758016729
Average loss at step 20000 : 8.13723055959
Nearest to one: two, operatorname, eight, six, nine, three, four, five,
Nearest to of: and, in, for, dasyprocta, with, between, nine, s,
Nearest to three: eight, four, two, nine, zero, seven, six, operatorname,
Nearest to war: vocals, feast, hundreds, machines, aoc, voluntarily, geoffrey, coimbra,
Nearest to in: and, of, on, for, at, from, with, by,
Nearest to are: were, is, was, ilium, zero, tiny, by, would,
Nearest to were: are, was, is, capable, and, transportation, arkham, modestly,
Nearest to the: a, dasyprocta, one, his, operatorname, this, their, circ,
Nearest to new: tamar, random, synthesized, veronica, tian, of, mathbf, readable,
Nearest to american: quarterly, s, bckgr, feminist, and, d, subsistence, helps,
Nearest to they: there, frye, he, anglicans, theirs, creating, elite, often,
Nearest to its: the, his, dasyprocta, eichmann, circ, a, en, ancestors,
Nearest to seven: nine, eight, four, zero, six, three, two, five,
Nearest to which: that, and, this, agincourt, tissue, the, dictatorship, toe,
Nearest to up: module, his, blacks, partners, alien, agouti, adding, austin,
Nearest to has: had, is, was, have, scandalous, decrees, kubitzki, marlon,
Average loss at step 22000 : 7.06678244722
Average loss at step 24000 : 6.85123083913
Average loss at step 26000 : 6.81250965095
Average loss at step 28000 : 6.34419031024
Average loss at step 30000 : 5.9245999701
Average loss at step 32000 : 5.93673320675
Average loss at step 34000 : 5.70171094501
Average loss at step 36000 : 5.74694340336
Average loss at step 38000 : 5.50405748427
Average loss at step 40000 : 5.25322429836
Nearest to one: two, eight, four, six, three, seven, zero, operatorname,
Nearest to of: zero, and, in, dasyprocta, for, agouti, recitative, eight,
Nearest to three: four, six, eight, five, seven, two, zero, one,
Nearest to war: feast, brass, machines, vocals, automobile, voluntarily, vascular, aoc,
Nearest to in: zero, and, at, on, dasyprocta, from, during, of,
Nearest to are: were, is, zero, was, progesterone, abet, have, calais,
Nearest to were: are, was, is, be, zero, have, had, by,
Nearest to the: its, dasyprocta, his, their, zero, agouti, operatorname, circ,
Nearest to new: tamar, veronica, synthesized, random, midrashim, ancestor, dasyprocta, elephant,
Nearest to american: and, quarterly, abakan, zero, indentured, bckgr, feminist, vma,
Nearest to they: there, he, it, we, not, deport, i, discard,
Nearest to its: the, their, his, dasyprocta, a, circ, some, zero,
Nearest to seven: six, eight, four, five, nine, zero, three, two,
Nearest to which: that, this, also, it, and, tissue, agincourt, one,
Nearest to up: module, mirza, recitative, enabling, partners, abandoning, cyanobacteria, mg,
Nearest to has: had, was, is, have, scandalous, amalthea, decrees, aba,
Average loss at step 42000 : 5.36403241181
Average loss at step 44000 : 5.27934718394
Average loss at step 46000 : 5.25050886309
Average loss at step 48000 : 5.24700605953
Average loss at step 50000 : 4.9966404134
Average loss at step 52000 : 5.03326895094
Average loss at step 54000 : 5.17822365785
Average loss at step 56000 : 5.04268380868
Average loss at step 58000 : 5.06483457124
Average loss at step 60000 : 4.93359541976
Nearest to one: two, four, three, six, five, eight, operatorname, seven,
Nearest to of: and, for, in, nine, dasyprocta, eight, including, six,
Nearest to three: four, five, two, six, eight, seven, operatorname, one,
Nearest to war: machines, boreal, feast, brass, automobile, rebellious, kadyrov, denigrating,
Nearest to in: from, during, at, dasyprocta, on, and, microsite, kapoor,
Nearest to are: were, is, have, was, zero, michelob, other, be,
Nearest to were: are, was, had, have, is, be, by, zero,
Nearest to the: its, dasyprocta, their, circ, recitative, his, a, operatorname,
Nearest to new: tamar, random, veronica, synthesized, member, xb, midrashim, tian,
Nearest to american: and, abakan, british, quarterly, abercrombie, indentured, feminist, bckgr,
Nearest to they: he, there, we, it, you, i, not, who,
Nearest to its: their, his, the, dasyprocta, bckgr, some, circ, dddddd,
Nearest to seven: eight, six, five, nine, four, three, zero, operatorname,
Nearest to which: this, that, also, it, but, ursus, one, wct,
Nearest to up: module, them, mirza, recitative, partners, enabling, cyanobacteria, abandoning,
Nearest to has: had, have, was, is, ursus, wct, decrees, amalthea,
Average loss at step 62000 : 4.99505268264
Average loss at step 64000 : 4.82697634709
Average loss at step 66000 : 4.59925288892
Average loss at step 68000 : 4.98079027224
Average loss at step 70000 : 4.89412822211
Average loss at step 72000 : 4.74675208092
Average loss at step 74000 : 4.80340922415
Average loss at step 76000 : 4.72690085912
Average loss at step 78000 : 4.79733606535
Average loss at step 80000 : 4.80540977299
Nearest to one: seven, six, two, five, four, operatorname, ursus, three,
Nearest to of: mico, in, dasyprocta, including, abet, kapoor, abakan, original,
Nearest to three: six, four, two, five, seven, eight, operatorname, lymphoma,
Nearest to war: machines, boreal, automobile, feast, kadyrov, brass, rebellious, geophysical,
Nearest to in: during, at, dasyprocta, from, ursus, and, of, under,
Nearest to are: were, is, have, be, was, michelob, britney, these,
Nearest to were: are, was, have, had, be, by, been, ursus,
Nearest to the: their, dasyprocta, a, kapoor, its, operatorname, iit, circ,
Nearest to new: tamar, random, member, veronica, synthesized, xb, dasyprocta, electrical,
Nearest to american: british, abakan, abercrombie, mico, indentured, bckgr, nunnery, indian,
Nearest to they: there, he, we, you, it, who, she, not,
Nearest to its: their, his, the, dasyprocta, dddddd, bckgr, her, iit,
Nearest to seven: six, eight, five, four, nine, three, one, two,
Nearest to which: that, this, also, but, it, ursus, what, and,
Nearest to up: filmfour, them, module, mirza, him, out, abandoning, recitative,
Nearest to has: had, have, was, is, ursus, decrees, wct, amalthea,
Average loss at step 82000 : 4.75804372787
Average loss at step 84000 : 4.75755859768
Average loss at step 86000 : 4.77840362
Average loss at step 88000 : 4.74728782678
Average loss at step 90000 : 4.73435067379
Average loss at step 92000 : 4.66841691899
Average loss at step 94000 : 4.72742706275
Average loss at step 96000 : 4.69911255908
Average loss at step 98000 : 4.60135727322
Average loss at step 100000 : 4.70121149051
Nearest to one: two, six, seven, five, four, operatorname, three, eight,
Nearest to of: mico, in, dasyprocta, including, cebus, globemaster, kapoor, same,
Nearest to three: four, five, six, seven, two, eight, operatorname, lymphoma,
Nearest to war: boreal, automobile, brass, feast, machines, kadyrov, hanna, cotswold,
Nearest to in: during, at, on, dasyprocta, microsite, from, under, within,
Nearest to are: were, is, have, these, be, michelob, britney, while,
Nearest to were: are, was, have, had, be, is, by, been,
Nearest to the: dasyprocta, their, a, its, agouti, kapoor, iit, this,
Nearest to new: tamar, random, veronica, member, synthesized, xb, mishnayot, dasyprocta,
Nearest to american: british, abakan, indian, abercrombie, mico, and, quarterly, bckgr,
Nearest to they: he, there, we, you, it, she, not, who,
Nearest to its: their, his, the, dasyprocta, her, elwes, bckgr, some,
Nearest to seven: eight, six, five, four, nine, three, zero, two,
Nearest to which: that, this, but, also, what, it, ursus, and,
Nearest to up: them, filmfour, out, him, module, abandoning, recitative, mirza,
Nearest to has: had, have, was, is, ursus, stationary, wct, globemaster,
代码:
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
# 设置图片大小
plt.figure(figsize=(15, 15)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')# mac:method='exact'
# 画500个点
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
运行结果:
代码:
with tf.Session() as session:
valid_word = "one"
valid_examples = dictionary[valid_word]
valid_dataset = tf.constant([valid_examples], dtype=tf.int32)
valid_embeddings = tf.nn.embedding_lookup(final_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, final_embeddings, transpose_b=True)
sim = similarity.eval()
top_k = 8 # number of nearest neighbors
nearest = (-sim[0]).argsort()[1:top_k + 1]# 排除自己本身,从小到大排序
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
运行结果:
Nearest to one: two, six, seven, five, four, operatorname, three, eight,