前一时刻的特征对后一时刻产生影响
CNN-视觉
RNN-自然语言
RNN-所有特征都记下来,误差大
LSTM-合理遗忘
自然语言处理-词向量模型-Word2Vec
目的是构造向量
先来考虑第一个问题:如何能将文本向量化呢?看起来比较抽象,可以先从人的角度来观察。
输入:第一个词,第二个词
输出:第三个词,找那个词的概率最大
红框向右滑动,步长为1
CBOW:输入是上文、下文,输出是中间文
Skipgram:输入中间文,输出上文、下文
这两个效果差不多
反向传播
CNN是对w求导即可
RNN分别对w和x求偏导
如果一个语料库稍微大一些,可能的结果简直太多了,最后一层相当于softmax,计算起来十分耗时,有什么办法来解决嘛?
初始方案:输入两个单词,看他们是不是前后对应的输入和输出,也就相当于一个二分类任务
出发点非常好,但是此时训练集构建出来的标签全为1,无法进行较好的训练
改进方案:加入一些负样本(负采样模型)
负采样推介设为5
实例
import collections
import os
import random
import urllib
import zipfile
import numpy as np
import tensorflow as tf
# 训练参数
learning_rate = 0.1 #学习率
batch_size = 128
num_steps = 3000000 #训练次数,这个案例次数较小,因为样本小
display_step = 10000 #每隔多少次打印当前损失值
eval_step = 200000 #每隔多少次试验一下效果
# 测试样例
eval_words = ['nine', 'of', 'going', 'hardware', 'american', 'britain']
# Word2Vec 参数
embedding_size = 200 # 词向量维度,可调
max_vocabulary_size = 50000 # 语料库词语数,不重复的词,案例较大
min_occurrence = 10 # 最小词频,词频少的去掉
skip_window = 3 # 左右窗口大小
num_skips = 2 # 一次制作多少个输入输出对
num_sampled = 64 # 负采样,每次选择多少样本
# 加载训练数据,其实什么数据都行
data_path = 'text8.zip'
with zipfile.ZipFile(data_path) as f:
text_words = f.read(f.namelist()[0]).lower().split()
len(text_words) #单词数量
17005207
词频控制
counter,计数器,统计每个词出现的次数
# 创建一个计数器,计算每个词出现了多少次
count = [('UNK', -1)] #UNK:unknow
# 基于词频返回max_vocabulary_size个常用词吗,上面设置了50000个
count.extend(collections.Counter(text_words).most_common(max_vocabulary_size - 1))
默认已经是排序好了的
count[0:10]
[(‘UNK’, -1),
(b’the’, 1061396),
(b’of’, 593677),
(b’and’, 416629),
(b’one’, 411764),
(b’in’, 372201),
(b’a’, 325873),
(b’to’, 316376),
(b’zero’, 264975),
(b’nine’, 250430)]
最小值控制
别忘了,咱们还设置了min_occurrence参数,需要判断每一个词是否满足给定条件
# 剔除掉出现次数少于'min_occurrence'的词
for i in range(len(count) - 1, -1, -1):# 从start到end每次step多少,每次-1
if count[i][1] < min_occurrence:
count.pop(i)
else:
# 判断时,从小到大排序的,所以跳出时候剩下的都是满足条件的
break
词-ID映射
每个词对应一个ID
# 计算语料库大小
vocabulary_size = len(count)
# 每个词都分配一个ID
word2id = dict()
for i, (word, _)in enumerate(count):
word2id[word] = i
word2id
所有词转换成ID
data = list()
unk_count = 0
for word in text_words:
# 全部转换成id
index = word2id.get(word, 0) #0表示unknown
if index == 0:
unk_count += 1
data.append(index)
count[0] = ('UNK', unk_count)
id2word = dict(zip(word2id.values(), word2id.keys())) #word to ID 的反结构
print("Words count:", len(text_words))
print("Unique words:", len(set(text_words))) #不重复此的大小
print("Vocabulary size:", vocabulary_size) #原来50000的那个,有些不满足词频
print("Most common words:", count[:10])
构建所需训练数据
data_index = 0
def next_batch(batch_size, num_skips, skip_window): #窗口操作
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
# get window size (words left and right + current one).
span = 2 * skip_window + 1 #7为窗口,左3右3中间1
buffer = collections.deque(maxlen=span)#创建一个长度为7的队列
if data_index + span > len(data):#如果数据被滑完一遍了
data_index = 0
buffer.extend(data[data_index:data_index + span])#队列里存的是当前窗口,例如deque([5234, 3081, 12, 6, 195, 2, 3134], maxlen=7) #5234是ID
data_index += span
for i in range(batch_size // num_skips):#num_skips表示取多少组不同的词作为输出,此例为2,//为除法忽略余数
context_words = [w for w in range(span) if w != skip_window]#指定上下文,就是[0, 1, 2, 4, 5, 6],span==7
words_to_use = random.sample(context_words, num_skips)#在上下文里随机选2个候选词
for j, context_word in enumerate(words_to_use):#遍历每一个候选词,用其当做输出也就是标签
batch[i * num_skips + j] = buffer[skip_window]#输入都为当前窗口的中间词,即3
labels[i * num_skips + j, 0] = buffer[context_word]#用当前候选词当做标签
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])#之前已经传入7个词了,窗口要右移了,例如原来为[5234, 3081, 12, 6, 195, 2, 3134],现在为[3081, 12, 6, 195, 2, 3134, 46]
data_index += 1
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
with tf.device('/cpu:0'): #embedding:词映射成向量
embedding = tf.Variable(tf.random.normal([vocabulary_size, embedding_size])) #维度:47135, 200,随机初始化向量
nce_weights = tf.Variable(tf.random.normal([vocabulary_size, embedding_size]))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
通过tf.nn.embedding_lookup函数将索引转换成词向量
def get_embedding(x):
with tf.device('/cpu:0'):
x_embed = tf.nn.embedding_lookup(embedding, x) #找到索引对应的向量
return x_embed
损失函数定义
先分别计算出正样本和采样出的负样本对应的output和label
再通过 sigmoid cross entropy来计算output和label的loss
def nce_loss(x_embed, y):
with tf.device('/cpu:0'):
y = tf.cast(y, tf.int64)
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=y,
inputs=x_embed,
num_sampled=num_sampled,#采样出多少个负样本
num_classes=vocabulary_size))
return loss
测试观察模块
# Evaluation.评估模块,找到最相近向量
def evaluate(x_embed):
with tf.device('/cpu:0'):
# Compute the cosine similarity between input data embedding and every embedding vectors
x_embed = tf.cast(x_embed, tf.float32) #训练好之后某一词的向量
x_embed_norm = x_embed / tf.sqrt(tf.reduce_sum(tf.square(x_embed)))#归一化
embedding_norm = embedding / tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keepdims=True), tf.float32)#全部向量的
cosine_sim_op = tf.matmul(x_embed_norm, embedding_norm, transpose_b=True)#计算余弦相似度
return cosine_sim_op
# SGD,验证模块
optimizer = tf.optimizers.SGD(learning_rate)
# 迭代优化
def run_optimization(x, y):
with tf.device('/cpu:0'):
with tf.GradientTape() as g:
emb = get_embedding(x)
loss = nce_loss(emb, y)
# 计算梯度
gradients = g.gradient(loss, [embedding, nce_weights, nce_biases])
# 更新
optimizer.apply_gradients(zip(gradients, [embedding, nce_weights, nce_biases]))
# 待测试的几个词
x_test = np.array([word2id[w.encode('utf-8')] for w in eval_words])
# 训练
for step in range(1, num_steps + 1):
batch_x, batch_y = next_batch(batch_size, num_skips, skip_window)
run_optimization(batch_x, batch_y)
if step % display_step == 0 or step == 1:
loss = nce_loss(get_embedding(batch_x), batch_y)
print("step: %i, loss: %f" % (step, loss))
# Evaluation.
if step % eval_step == 0 or step == 1:
print("Evaluation...")
sim = evaluate(get_embedding(x_test)).numpy()
for i in range(len(eval_words)):
top_k = 8 # 返回前8个最相似的
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = '"%s" nearest neighbors:' % eval_words[i]
for k in range(top_k):
log_str = '%s %s,' % (log_str, id2word[nearest[k]])
print(log_str)