import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as tud
from torch.nn.parameter import Parameter
from collections import Counter
import numpy as np
import random
import math
import pandas as pd
import scipy
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
USE_CUDA = torch.cuda.is_available()# 为了保证实验结果可以复现,我们经常会把各种random seed固定在某一个值
random.seed(53113)
np.random.seed(53113)
torch.manual_seed(53113)if USE_CUDA:
torch.cuda.manual_seed(53113)# 设定一些超参数
K =100# number of negative samples
C =3# nearby words threshold
NUM_EPOCHS =2# The number of epochs of training
MAX_VOCAB_SIZE =30000# the vocabulary size
BATCH_SIZE =128# the batch size
LEARNING_RATE =0.2# the initial learning rate
EMBEDDING_SIZE =100
LOG_FILE ='word-embedding.log'# tokenize函数,把一篇文本转化成一个个单词defword_tokenize(text):return text.split()# 从文本文件中读取所有的文字,通过这些文本创建一个vocabulary# 由于单词数量可能太大,我们只选取最常见的MAX_VOCAB_SIZE个单词# 我们添加一个UNK单词表示所有不常见的单词# 我们需要记录单词到index的mapping,以及index到单词的mapping,单词的count,单词的(normalized) frequency,以及单词总数。withopen('text8.train.txt','r')as fin:# 按照字节读取,返回一个字符串
text = fin.read()
text =[w for w in word_tokenize(text.lower())]# #建立词典,最后一位留给不常用或者没出现过的单词
vocab =dict(Counter(text).most_common(MAX_VOCAB_SIZE-1))
vocab['<unk>']=len(text)- np.sum(list(vocab.values()))#建立词典,把key拿出来
idx_to_word =[word for word in vocab.keys()]#字典{the:1, of:2, I:3, love:4, he:5, you:6}
word_to_idx ={word:i for i,word inenumerate(idx_to_word)}
word_counts = np.array([count for count in vocab.values()],dtype=np.float32)
word_freqs = word_counts/np.sum(word_counts)
word_freqs = word_freqs **(3./4.)
word_freqs = word_freqs / np.sum(word_freqs)
VOCAB_SIZE =len(idx_to_word)print(VOCAB_SIZE)
(2)实现dataloader
# 实现Dataloader¶# 一个dataloader需要以下内容:# 把所有text编码成数字,然后用subsampling预处理这些文字。# 保存vocabulary,单词count,normalized word frequency# 每个iteration sample一个中心词# 根据当前的中心词返回context单词# 根据中心词sample一些negative单词# 返回单词的counts# 有了dataloader之后,我们可以轻松随机打乱整个数据集,拿到一个batch的数据等等。classWordEmbeddingDataset(tud.Dataset):def__init__(self,text,word_to_idx,idx_to_word,word_freqs,word_counts):''' text: a list of words, all text from the training dataset
word_to_idx: the dictionary from word to idx
idx_to_word: idx to word mapping
word_freq: the frequency of each word
word_counts: the word counts
'''super(WordEmbeddingDataset,self).__init__()# 把 text 文章中的所有词替换成 字典中的编号,get()函数,设置默认值
self.text_encoded =[word_to_idx.get(t,VOCAB_SIZE-1)for t in text]# 把numpy 类型的变量转为 tensor类型
self.text_encoded = torch.Tensor(self.text_encoded).long()
self.word_to_idx = word_to_idx
self.idx_to_word = idx_to_word
#记录字典单词出现的频率 转为 tensor类型
self.word_freqs = torch.Tensor(word_freqs)#记录字典单词出现的次数 转为 tensor类型
self.word_counts = torch.Tensor(word_counts)def__len__(self):''' 返回整个数据集(所有单词)的长度
'''returnlen(self.text_encoded)def__getitem__(self,idx):''' 这个function返回以下数据用于训练
- 中心词
- 这个单词附近的(positive)单词
- 随机采样的K个单词作为negative sample
'''#找到中心词在text中的位置 eq. love 为 4 .
center_word = self.text_encoded[idx]# 找到在文中 中心词附近的 positive sample 这里C= 3. 有2*C 个 positive sample
pos_indices =list(range(idx-C,idx))+list(range(idx+1,idx+C+1))# 如果 中心词在开头或者结尾,那么我们把负数转到text的结尾或者是开头。eq -1%10 = 9
pos_indices =[ i%len(self.text_encoded)for i in pos_indices]#找到中心词对应 positive 单词在字典中的编号
pos_words = self.text_encoded[pos_indices]#负采样,根据权重采样,K*pos个单词,抽取的是word_freqs的下标索引值。
neg_words = torch.multinomial(self.word_freqs, K * pos_words.shape[0],True)#把中心词, postive samples and negative samples 的编号返回return center_word,pos_words,neg_words
dataset = WordEmbeddingDataset(text,word_to_idx,idx_to_word,word_freqs,word_counts)#num_workers 使用进程数,0 为1个进程。
dataloader = tud.DataLoader(dataset,batch_size=BATCH_SIZE,shuffle=True,num_workers=0)for i,(input_labels,pos_labels,neg_labels)inenumerate(dataloader):print("------------------",i,"-----------------------")print(input_labels)print(pos_labels)print(neg_labels)
defevaluate(filename,embedding_weights):if filename.endswith(".csv"):
data = pd.read_csv(filename,sep=',')else:
data = pd.read_csv(filename,sep='\t')
human_similarity =[]
model_similarity =[]for i in data.iloc[:,0:2].index:
word1,word2 = data.iloc[i,0],data.iloc[i,1]if word1 notin word_to_idx or word2 notin word_to_idx:continueelse:
word1_idx,word2_idx = word_to_idx[word1],word_to_idx[word2]
word1_embed,word2_embed = embedding_weights[[word1_idx]],embedding_weights[[word2_idx]]# 计算余弦相似度
model_similarity.append(float(sklearn.metrics.pairwise.cosine_similarity(word1_embed,word2_embed)))
human_similarity.append(float(data.iloc[i,2]))# spearman秩相关系数是度量两个变量之间的统计相关性的指标,用来评估当用单调函数来描述两个变量之间的关系有多好。return scipy.stats.spearmanr(human_similarity,model_similarity)
4.定义优化函数
model = EmbeddingModel(VOCAB_SIZE,EMBEDDING_SIZE)if USE_CUDA:
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(),lr=LEARNING_RATE)
5.训练模型
# 训练模型for e inrange(NUM_EPOCHS):
t =0for i,(input_labels,pos_labels,neg_labels)inenumerate(dataloader):
input_labels = input_labels.long()
pos_labels = pos_labels.long()
neg_labels = neg_labels.long()if USE_CUDA:
input_labels = input_labels.cuda()
pos_labels = pos_labels.cuda()
neg_labels = neg_labels.cuda()
optimizer.zero_grad()
loss = model(input_labels,pos_labels,neg_labels).mean()
loss.backward()
optimizer.step()if i %100==0:withopen(LOG_FILE,"a")as fout:
fout.write("epoch:{},iter:{},loss:{}\n".format(e,i,loss.item()))print("epoch:{},iter:{},loss:{}".format(e,i,loss.item()))if i %2000==0:
embedding_weights = model.input_embeddings()
sim_simlex = evaluate("simlex-999.txt",embedding_weights)
sim_men = evaluate("men.txt",embedding_weights)
sim_353 = evaluate("wordsim353.csv",embedding_weights)withopen(LOG_FILE,"a")as fout:print("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
fout.write("epoch: {}, iteration: {}, simlex-999: {}, men: {}, sim353: {}, nearest to monster: {}\n".format(
e, i, sim_simlex, sim_men, sim_353, find_nearest("monster")))
embedding_weights = model.input_embeddings()
np.save("embedding--{}".format(EMBEDDING_SZIE),embedding_weights)
torch.save(model.state_dict(),"embedding-{}.th".format(EMBEDDING_SIZE))
6.准确度评估
deffind_nearest(word):
index = word_to_idx[word]
embedding = embedding_weights[index]
cos_dis = np.array([scipy.spatial.distance.cosine(e,embedding)for e in embedding_weights])return[idx_to_word[i]for i in cos_dis.argsort()[:10]]for word in["good","fresh","monster","green","like","america","chicago","work","computer","language"]:print(word, find_nearest(word))
man_idx = word_to_idx["man"]
king_idx = word_to_idx["king"]
woman_idx = word_to_idx["woman"]
embedding = embedding_weights[woman_idx]- embedding_weights[man_idx]+ embedding_weights[king_idx]
cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding)for e in embedding_weights])for i in cos_dis.argsort()[:20]:print(idx_to_word[i])
pytorch实现word2vec1.准备训练数据(1)读取文本数据(2)实现dataloader2.定义模型3.定义评估函数4.定义优化函数5.训练模型6.准确度评估1.准备训练数据(1)读取文本数据import torchimport torch.nn as nnimport torch.nn.functional as Fimport torch.utils.data as tudfrom torch.nn.parameter import Parameterfrom collect