一、学习目标
二、词向量
1. 计算机表示词语
1.1 离散表示:One-hot表示
1.2.离散表示:Bag of Words
1.3离散表示:Bi-gram和N-gram
1.4 离散表示存在的问题
2.词编码需要保证词的相似性
2.1简单 词/短语 翻译
2.2 向量空间子结构
2.3 分布式表示
3.Word2Vec: Skip-Gram模型
3.1 Skip-gram的损失函数
3.2 Skip-Gram:负例采样
4.词嵌入可视化
4.1词嵌入可视化:公司—CEO
4.2 词嵌入可视化:词向
4.3 词嵌入可视化:比较级和最高级
4.4 词嵌入效果评估:词类比任务
4.5 词嵌入效果评估:词相似度任务
4.6 词嵌入效果评估:作为特征用于CRF实体识别
三 语言模型
1.链式法则
2.Markov假设
3.语言模型的评价
4.基于神经网络的语言模型(Neural Language Model)
5.循环神经网络
6.训练RNN存在的困难
6.1梯度消失和爆炸问题
6.2 长期依赖问题
7.长短记忆网络(LSTM)
7.1 RNN记忆细胞与LSTM记忆细胞
7.2 LSTM关键:“细胞状态"
7.3LSTM控制"细胞状态"的方式
7.4 LSTM控制"细胞状态"的流程
7.5 LSTM的常用公式
四 代码实战
1.实现Dataloader
class WordEmbeddingDataset(tud.Dataset):
def __init__(self, text, word_to_idx, idx_to_word, word_freqs, word_counts):
''' text: a list of words, all text from the training dataset
word_to_idx: the dictionary from word to idx
idx_to_word: idx to word mapping
word_freq: the frequency of each word
word_counts: the word counts
'''
super(WordEmbeddingDataset, self).__init__()
self.text_encoded = [word_to_idx.get(t, VOCAB_SIZE-1) for t in text]
self.text_encoded = torch.Tensor(self.text_encoded).long()
self.word_to_idx = word_to_idx
self.idx_to_word = idx_to_word
self.word_freqs = torch.Tensor(word_freqs)
self.word_counts = torch.Tensor(word_counts)
def __len__(self):
''' 返回整个数据集(所有单词)的长度
'''
return len(self.text_encoded)
def __getitem__(self, idx):
''' 这个function返回以下数据用于训练
- 中心词
- 这个单词附近的(positive)单词
- 随机采样的K个单词作为negative sample
'''
center_word = self.text_encoded[idx]
pos_indices = list(range(idx-C, idx)) + list(range(idx+1, idx+C+1))
pos_indices = [i%len(self.text_encoded) for i in pos_indices]
pos_words = self.text_encoded[pos_indices]
neg_words = torch.multinomial(self.word_freqs, K * pos_words.shape[0], True)
return center_word, pos_words, neg_words
创建dataset和dataloader
dataset = WordEmbeddingDataset(text, word_to_idx, idx_to_word, word_freqs, word_counts)
dataloader = tud.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
2.定义PyTorch模型
class EmbeddingModel(nn.Module):
def __init__(self, vocab_size, embed_size):
''' 初始化输出和输出embedding
'''
super(EmbeddingModel, self).__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
initrange = 0.5 / self.embed_size
self.out_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)
self.out_embed.weight.data.uniform_(-initrange, initrange)
self.in_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)
self.in_embed.weight.data.uniform_(-initrange, initrange)
def forward(self, input_labels, pos_labels, neg_labels):
'''
input_labels: 中心词, [batch_size]
pos_labels: 中心词周围 context window 出现过的单词 [batch_size * (window_size * 2)]
neg_labelss: 中心词周围没有出现过的单词,从 negative sampling 得到 [batch_size, (window_size * 2 * K)]
return: loss, [batch_size]
'''
batch_size = input_labels.size(0)
input_embedding = self.in_embed(input_labels) # B * embed_size
pos_embedding = self.out_embed(pos_labels) # B * (2*C) * embed_size
neg_embedding = self.out_embed(neg_labels) # B * (2*C * K) * embed_size
log_pos = torch.bmm(pos_embedding, input_embedding.unsqueeze(2)).squeeze() # B * (2*C)
log_neg = torch.bmm(neg_embedding, -input_embedding.unsqueeze(2)).squeeze() # B * (2*C*K)
log_pos = F.logsigmoid(log_pos).sum(1)
log_neg = F.logsigmoid(log_neg).sum(1) # batch_size
loss = log_pos + log_neg
return -loss
def input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
定义一个模型以及把模型移动到GPU
model = EmbeddingModel(VOCAB_SIZE, EMBEDDING_SIZE)
if USE_CUDA:
model = model.cuda()
下面是评估模型的代码,以及训练模型的代码
def evaluate(filename, embedding_weights):
if filename.endswith(".csv"):
data = pd.read_csv(filename, sep=",")
else:
data = pd.read_csv(filename, sep="\t")
human_similarity = []
model_similarity = []
for i in data.iloc[:, 0:2].index:
word1, word2 = data.iloc[i, 0], data.iloc[i, 1]
if word1 not in word_to_idx or word2 not in word_to_idx:
continue
else:
word1_idx, word2_idx = word_to_idx[word1], word_to_idx[word2]
word1_embed, word2_embed = embedding_weights[[word1_idx]], embedding_weights[[word2_idx]]
model_similarity.append(float(sklearn.metrics.pairwise.cosine_similarity(word1_embed, word2_embed)))
human_similarity.append(float(data.iloc[i, 2]))
return scipy.stats.spearmanr(human_similarity, model_similarity)# , model_similarity
def find_nearest(word):
index = word_to_idx[word]
embedding = embedding_weights[index]
cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
return [idx_to_word[i] for i in cos_dis.argsort()[:10]]
训练模型:
模型一般需要训练若干个epoch
①每个epoch我们都把所有的数据分成若干个batch
②把每个batch的输入和输出都包装成cuda tensor
③forward pass,通过输入的句子预测每个单词的下一个单词
④用模型的预测和正确的下一个单词计算cross entropy loss
⑤清空模型当前gradient
⑥backward pass
⑦更新模型参数
⑧每隔一定的iteration输出模型在当前iteration的loss,以及在验证数据集上做模型的评估
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
for e in range(NUM_EPOCHS):
for i, (input_labels, pos_labels, neg_labels) in enumerate(dataloader):
# TODO
input_labels = input_labels.long()
pos_labels = pos_labels.long()
neg_labels = neg_labels.long()
if USE_CUDA:
input_labels = input_labels.cuda()
pos_labels = pos_labels.cuda()
neg_labels = neg_labels.cuda()
optimizer.zero_grad()
loss = model(input_labels, pos_labels, neg_labels).mean()
loss.backward()
optimizer.step()
if i % 100 == 0:
with open(LOG_FILE, "a") as fout:
fout.write("epoch: {}, iter: {}, loss: {}\n".format(e, i, loss.item()))
print("epoch: {}, iter: {}, loss: {}".format(e, i, loss.item()))
if i % 2000 == 0:
embedding_weights = model.input_embeddings()
sim_simlex = evaluate("simlex-999.txt", embedding_weights)
sim_men = evaluate("men.txt", embedding_weights)
sim_353 = evaluate("wordsim353.csv", embedding_weights)
with open(LOG_FILE, "a") as fout:
print("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
fout.write("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
embedding_weights = model.input_embeddings()
np.save("embedding-{}".format(EMBEDDING_SIZE), embedding_weights)
torch.save(model.state_dict(), "embedding-{}.th".format(EMBEDDING_SIZE))
截取部分截图如图所示
epoch: 0, iter: 0, loss: 420.04736328125
epoch: 0, iteration: 0, simlex-999: SpearmanrResult(correlation=0.002806243285464091, pvalue=0.9309107582703205), men: SpearmanrResult(correlation=-0.03578915454199749, pvalue=0.06854012381329619), sim353: SpearmanrResult(correlation=0.02468906830123471, pvalue=0.6609497549092586), nearest to monster: ['monster', 'communism', 'bosses', 'microprocessors', 'infectious', 'debussy', 'unesco', 'tantamount', 'offices', 'tischendorf']
epoch: 0, iter: 100, loss: 278.9967041015625
epoch: 0, iter: 200, loss: 248.71990966796875
epoch: 0, iter: 300, loss: 202.95816040039062
epoch: 0, iter: 400, loss: 157.04776000976562
epoch: 0, iter: 500, loss: 137.83531188964844
epoch: 0, iter: 600, loss: 121.03585815429688
epoch: 0, iter: 700, loss: 105.300537109375
epoch: 0, iter: 800, loss: 114.10055541992188
epoch: 0, iter: 900, loss: 104.72723388671875
epoch: 0, iter: 1000, loss: 99.03569030761719
epoch: 0, iter: 1100, loss: 95.2179946899414
epoch: 0, iter: 1200, loss: 84.12557983398438
epoch: 0, iter: 1300, loss: 88.07209777832031
epoch: 0, iter: 1400, loss: 70.44454193115234
epoch: 0, iter: 1500, loss: 79.83641052246094
epoch: 0, iter: 1600, loss: 81.7451171875
epoch: 0, iter: 1700, loss: 75.91305541992188
epoch: 0, iter: 1800, loss: 65.86140441894531
epoch: 0, iter: 1900, loss: 69.81714630126953
epoch: 0, iter: 2000, loss: 71.05166625976562
epoch: 0, iteration: 2000, simlex-999: SpearmanrResult(correlation=-0.011490367338787073, pvalue=0.7225847577400916), men: SpearmanrResult(correlation=0.05671509287050605, pvalue=0.0038790264864563434), sim353: SpearmanrResult(correlation=-0.07381419228558825, pvalue=0.18921537418718104), nearest to monster: ['monster', 'harm', 'steel', 'dean', 'kansas', 'surgery', 'regardless', 'capitalism', 'offers', 'hockey']
3.在 MEN 和 Simplex-999 数据集上做评估
embedding_weights = model.input_embeddings()
print("simlex-999", evaluate("simlex-999.txt", embedding_weights))
print("men", evaluate("men.txt", embedding_weights))
print("wordsim353", evaluate("wordsim353.csv", embedding_weights))
实验结果如下:
simlex-999 SpearmanrResult(correlation=0.17251697429101504, pvalue=7.863946056740345e-08)
men SpearmanrResult(correlation=0.1778096817088841, pvalue=7.565661657312768e-20)
wordsim353 SpearmanrResult(correlation=0.27153702278146635, pvalue=8.842165885381714e-07)
4.寻找nearest neighbors
for word in ["good", "fresh", "monster", "green", "like", "america", "chicago", "work", "computer", "language"]:
print(word, find_nearest(word))
实验结果如下
good ['good', 'bad', 'perfect', 'hard', 'questions', 'alone', 'money', 'false', 'truth', 'experience']
fresh ['fresh', 'grain', 'waste', 'cooling', 'lighter', 'dense', 'mild', 'sized', 'warm', 'steel']
monster ['monster', 'giant', 'robot', 'hammer', 'clown', 'bull', 'demon', 'triangle', 'storyline', 'slogan']
green ['green', 'blue', 'yellow', 'white', 'cross', 'orange', 'black', 'red', 'mountain', 'gold']
like ['like', 'unlike', 'etc', 'whereas', 'animals', 'soft', 'amongst', 'similarly', 'bear', 'drink']
america ['america', 'africa', 'korea', 'india', 'australia', 'turkey', 'pakistan', 'mexico', 'argentina', 'carolina']
chicago ['chicago', 'boston', 'illinois', 'texas', 'london', 'indiana', 'massachusetts', 'florida', 'berkeley', 'michigan']
work ['work', 'writing', 'job', 'marx', 'solo', 'label', 'recording', 'nietzsche', 'appearance', 'stage']
computer ['computer', 'digital', 'electronic', 'audio', 'video', 'graphics', 'hardware', 'software', 'computers', 'program']
language ['language', 'languages', 'alphabet', 'arabic', 'grammar', 'pronunciation', 'dialect', 'programming', 'chinese', 'spelling']
5.单词之间的关系
man_idx = word_to_idx["man"]
king_idx = word_to_idx["king"]
woman_idx = word_to_idx["woman"]
embedding = embedding_weights[woman_idx] - embedding_weights[man_idx] + embedding_weights[king_idx]
cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
for i in cos_dis.argsort()[:20]:
print(idx_to_word[i])
实验结果如下:
king
henry
charles
pope
queen
iii
prince
elizabeth
alexander
constantine
edward
son
iv
louis
emperor
mary
james
joseph
frederick
francis