import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as tud
from collections import Counter
import numpy as np
import random
import math
import pandas as pd
import scipy
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
USE_CUDA = torch.cuda.is_available()
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
if USE_CUDA:
torch.cuda.manual_seed(1)
C = 3
K = 100 #number of negative sample
NUM_EPOCHS = 2
MAX_VOCAB_SIZE = 30000
BATCH_SIZE = 128
LEARNING_RATE = 0.2
EMBEDDING_SIZE = 100
LOG_FILE = "word-embedding.log"
def word_tokenize(text):#分开文章单词 I/love/you/
return text.split()
S = './data/'
path = S + "\\text8.train.txt"
with open(path,"r") as fin:
text = fin.read()
text = word_tokenize(text)
vocab = dict(Counter(text).most_common(MAX_VOCAB_SIZE - 1))#建立词典,最后一位留给不常用或者没出现过的单词
vocab['<unk>'] = len(text) - np.sum(list(vocab.values()))#把不常用或者没出现过的单词 放到字典的最后一位
idx_to_word = [word for word in vocab.keys()] #建立词典,把key拿出来
word_to_idx = { word:i for i, word in enumerate(idx_to_word)}#字典{the:1, of:2, I:3, love:4, he:5, you:6}
words_counts = np.array([ count for count in vocab.values()], dtype=np.float32) #求文章总字数
word_frequency = words_counts / np.sum(words_counts)# 计算出 每个单词的频率
word_frequency = word_frequency ** (3./4.)
word_frequency = words_counts / np.sum(words_counts)#归一化
VOCAB_SIZE = len(word_frequency)
class WordEmbeddingDataset(tud.Dataset):
def __init__(self, text, word_to_idx, idx_to_word, word_freqs, word_counts):
''' text: a list of words, all text from the training dataset
word_to_idx: the dictionary from word to idx
idx_to_word: idx to word mapping
word_freq: the frequency of each word
word_counts: the word counts
'''
super(WordEmbeddingDataset, self).__init__()
self.text_encoded = [word_to_idx.get(t, VOCAB_SIZE - 1) for t in text]#把 text 文章中的所有词替换成 字典中的编号
# eq. 字典{the:1, of:2, I:3, love:4, he:5, you:6} 我们的text 文件内容是 I love you. => [3, 4 , 6]
self.text_encoded = torch.Tensor(self.text_encoded).long()#把numpy 类型的变量转为 tensor类型
self.word_to_idx = word_to_idx#导入字典
self.idx_to_word = idx_to_word
self.word_freqs = torch.Tensor(word_freqs) #记录字典单词出现的频率 转为 tensor类型
#eq. 字典{the:1, of:2, I:3, love:4, he:5, you:6} 中 the出现 2 次, of 出现 3 次,I 出现3次, love出现1次,he出现2次,you出现2次
#则 全部单词的数量为 2 + 3 + 3 + 1 + 2 +2 = 13, 则 word_freqs = {the:1/13, of:2/13, I:3/13, love:4/13, he:5/13, you:6/13}
self.word_counts = torch.Tensor(word_counts)#记录字典单词出现的次数 转为 tensor类型
def __len__(self):
''' 返回整个数据集(所有单词)的长度
'''
return len(self.text_encoded)
def __getitem__(self, idx):
''' 这个function返回以下数据用于训练
- 中心词
- 这个单词附近的(positive)单词
- 随机采样的K个单词作为negative sample
'''
center_word = self.text_encoded[idx] #找到中心词在text中的位置 eq. love 为 4 . 字典{the:1, of:2, I:3, love:4, he:5, you:6}
pos_indices = list(range(idx - C, idx)) + list(range(idx + 1, idx + C + 1))# 找到在文中 中心词附近的 positive sample 这里C= 3. 有2*C 个 positive sample
pos_indices = [i % len(self.text_encoded) for i in pos_indices]# 如果 中心词在开头或者结尾,那么我们把负数转到text的结尾或者是开头。eq -1%10 = 9
pos_words = self.text_encoded[pos_indices]#找到中性词对应 positive 单词 在字典中的编号
neg_words = torch.multinomial(self.word_freqs, K * pos_words.shape[0], True)#找到中性词对应 negative 单词 在字典中的编号
return center_word, pos_words, neg_words #把中心词, postive samples and negative samples 的编号返回
dataset = WordEmbeddingDataset(text, word_to_idx, idx_to_word, word_frequency, words_counts)
dataloader = tud.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)#num_workers 使用进程数,0 为1个进程。
class EmbeddingModel(nn.Module):
def __init__(self, vocab_size, embed_size):
''' 初始化输出和输出embedding
'''
super(EmbeddingModel, self).__init__()
self.vocab_size = vocab_size #字典大小 30000
self.embed_size = embed_size #单词维度 一般是50,100,300维
initrange = 0.5 / self.embed_size
self.out_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)#初始化一个矩阵,self.vocab_size * self.embed_size
self.out_embed.weight.data.uniform_(-initrange, initrange)#把矩阵中的权重初始化 设置在 -0.5 / self.embed_size到0.5 / self.embed_size间的随机值
self.in_embed = nn.Embedding(self.vocab_size, self.embed_size, sparse=False)#初始化一个矩阵,self.vocab_size * self.embed_size
self.in_embed.weight.data.uniform_(-initrange, initrange)#把矩阵中的权重初始化 设置在 -0.5 / self.embed_size到0.5 / self.embed_size间的随机值
def forward(self, input_labels, pos_labels, neg_labels):
'''
input_labels: 中心词, [batch_size]
pos_labels: 中心词周围 context window 出现过的单词 [batch_size * (window_size * 2)]
neg_labelss: 中心词周围没有出现过的单词,从 negative sampling 得到 [batch_size, (window_size * 2 * K)]
return: loss, [batch_size]
'''
batch_size = input_labels.size(0)
input_embedding = self.in_embed(input_labels) # B * embed_size
pos_embedding = self.out_embed(pos_labels) # B * (2*C) * embed_size
neg_embedding = self.out_embed(neg_labels) # B * (2*C * K) * embed_size
log_pos = torch.bmm(pos_embedding, input_embedding.unsqueeze(2)).squeeze() # B * (2*C)
log_neg = torch.bmm(neg_embedding, -input_embedding.unsqueeze(2)).squeeze() # B * (2*C*K)
log_pos = F.logsigmoid(log_pos).sum(1)
log_neg = F.logsigmoid(log_neg).sum(1) # batch_size
$\Gamma(n) = (n-1)!\quad\forall
loss = log_pos + log_neg
return -loss
def input_embeddings(self):
return self.in_embed.weight.data.cpu().numpy()
def evaluate(filename, embedding_weights):
if filename.endswith(".csv"):
data = pd.read_csv(filename, sep=",")
else:
data = pd.read_csv(filename, sep="\t")
human_similarity = []
model_similarity = []
for i in data.iloc[:, 0:2].index:
word1, word2 = data.iloc[i, 0], data.iloc[i, 1]
if word1 not in word_to_idx or word2 not in word_to_idx:
continue
else:
word1_idx, word2_idx = word_to_idx[word1], word_to_idx[word2]
word1_embed, word2_embed = embedding_weights[[word1_idx]], embedding_weights[[word2_idx]]
model_similarity.append(float(sklearn.metrics.pairwise.cosine_similarity(word1_embed, word2_embed)))
human_similarity.append(float(data.iloc[i, 2]))
return scipy.stats.spearmanr(human_similarity, model_similarity)# , model_similarity
def find_nearest(word):
index = word_to_idx[word]
embedding = embedding_weights[index]
cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in embedding_weights])
return [idx_to_word[i] for i in cos_dis.argsort()[:10]]
model = EmbeddingModel(VOCAB_SIZE, EMBEDDING_SIZE)
if USE_CUDA:
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE)
for e in range(NUM_EPOCHS):
t = 0
for i, (input_labels, pos_labels, neg_labels) in enumerate(dataloader):
t = i
#print(i,"aaaaaaaaaaaaaa")
#print(input_labels.shape)
# TODO
input_labels = input_labels.long()
pos_labels = pos_labels.long()
neg_labels = neg_labels.long()
if USE_CUDA:
input_labels = input_labels.cuda()
pos_labels = pos_labels.cuda()
neg_labels = neg_labels.cuda()
optimizer.zero_grad()
loss = model(input_labels, pos_labels, neg_labels).mean()
loss.backward()
optimizer.step()
if i % 1000 == 0:
with open(LOG_FILE, "a") as fout:
fout.write("epoch: {}, iter: {}, loss: {}\n".format(e, i, loss.item()))
print("epoch: {}, iter: {}, loss: {}".format(e, i, loss.item()))
if i % 2000 == 0:
embedding_weights = model.input_embeddings()
path = ''
path = S + "\\simlex-999.txt"
sim_simlex = evaluate(path, embedding_weights)
path =''
path = S + "\\men.txt"
sim_men = evaluate(path, embedding_weights)
path = ''
path = S + "\\wordsim353.csv"
sim_353 = evaluate(path, embedding_weights)
with open(LOG_FILE, "a") as fout:
print("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
fout.write(
"epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
embedding_weights = model.input_embeddings()
np.save("embedding-{}".format(EMBEDDING_SIZE), embedding_weights)
torch.save(model.state_dict(), "embedding-{}.th".format(EMBEDDING_SIZE))
// 用到的loss function 为下面所示
log_pos = F.logsigmoid(log_pos).sum(1)
log_neg = F.logsigmoid(log_neg).sum(1) # batch_size
loss = log_pos + log_neg
损失函数