python实现Word2Vec+哈夫曼树+skip-gram

阅读这篇之前如果对于层次softmax不清楚可以先看看http://124.222.190.191:8090/archives/word2vec-zhong-de-ha-fu-man-shu
再来阅读代码,你将会有意外收获

1.数据集
百度云网盘:链接:百度网盘 请输入提取码
提取码:4yla
–来自百度网盘超级会员V4的分享

2.代码

import numpy as np
from collections import deque
from huffman_tree import HuffmanTree

2.1数据处理代码

def _init_dict(min_count):
    word_count_sum = 0
    sentence_count = 0
    word2id_dict = dict()
    id2word_dict = dict()
    wordid_frequency_dict = dict()
    word_freq = dict()
    for line in input_file:
        line = line.strip().split()
        word_count_sum +=len(line)
        sentence_count +=1
        for i,word in enumerate(line):
            if i%1000000==0:
                print (i,len(line))
            if word_freq.get(word)==None:
                word_freq[word] = 1
            else:
                word_freq[word] += 1
    for i,word in enumerate(word_freq):
        if i % 100000 == 0:
            print(i, len(word_freq))
        if word_freq[word]<min_count:
            word_count_sum -= word_freq[word]
            continue
        word2id_dict[word] = len(word2id_dict)
        id2word_dict[len(id2word_dict)] = word
        wordid_frequency_dict[len(word2id_dict)-1] = word_freq[word]
    word_count =len(word2id_dict)
    return word2id_dict,id2word_dict,wordid_frequency_dict

word2id_dict,id2word_dict,wordid_frequency_dict = _init_dict(20)


def get_wordId_list():
    input_file = open("./data/text8.txt", encoding="utf-8")
    sentence = input_file.readline()
    wordId_list = []  # 一句中的所有word 对应的 id
    sentence = sentence.strip().split(' ')
    for i,word in enumerate(sentence):
        if i%1000000==0:
            print (i,len(sentence))
        try:
            word_id = word2id_dict[word]
            wordId_list.append(word_id)
        except:
            continue
    return wordId_list
wordId_list = get_wordId_list()

2.2利用滑动窗口对句子依次滑动

def get_batch_pairs(batch_size,window_size,index,word_pairs_queue):
    while len(word_pairs_queue) < batch_size:
        for _ in range(1000):
            if index == len(wordId_list):
                index = 0
            for i in range(max(index - window_size, 0), min(index + window_size + 1,len(wordId_list))):
                wordId_w = wordId_list[index]
                wordId_v = wordId_list[i]
                if index == i:  # 上下文=中心词 跳过
                    continue
                word_pairs_queue.append((wordId_w, wordId_v))
            index+=1
    result_pairs = []  # 返回mini-batch大小的正采样对
    for _ in range(batch_size):
        result_pairs.append(word_pairs_queue.popleft())
    return result_pairs

index = 0
word_pairs_queue = deque()
result_pairs = get_batch_pairs(32,3,index,word_pairs_queue)

2.3利用huffman树生成正负样本

huffman_tree = HuffmanTree(wordid_frequency_dict)  # 霍夫曼树
huffman_pos_path, huffman_neg_path = huffman_tree.get_all_pos_and_neg_path()
def get_pairs(pos_pairs):
    neg_word_pair = []
    pos_word_pair = []
    for pair in pos_pairs:
        pos_word_pair += zip([pair[0]] * len(huffman_pos_path[pair[1]]), huffman_pos_path[pair[1]])
        neg_word_pair += zip([pair[0]] * len(huffman_neg_path[pair[1]]), huffman_neg_path[pair[1]])
    return pos_word_pair, neg_word_pair

pos_word_pair, neg_word_pair = get_pairs(result_pairs)

结果:

2.4模型

import torch
import torch.nn as nn
import torch.nn.functional as F

class SkipGramModel(nn.Module):
    def __init__(self, emb_size, emb_dimension):
        super(SkipGramModel, self).__init__()
        self.emb_size = emb_size
        self.emb_dimension = emb_dimension
        self.w_embeddings = nn.Embedding(2*emb_size-1, emb_dimension, sparse=True)
        self.v_embeddings = nn.Embedding(2*emb_size-1, emb_dimension, sparse=True)
        self._init_emb()

    def _init_emb(self):
        initrange = 0.5 / self.emb_dimension
        self.w_embeddings.weight.data.uniform_(-initrange, initrange)
        self.v_embeddings.weight.data.uniform_(-0, 0)

    def forward(self, pos_w, pos_v,neg_w, neg_v):
        emb_w = self.w_embeddings(torch.LongTensor(pos_w))  # 转为tensor 大小 [ mini_batch_size * emb_dimension ]
        neg_emb_w = self.w_embeddings(torch.LongTensor(neg_w))
        emb_v = self.v_embeddings(torch.LongTensor(pos_v))
        neg_emb_v = self.v_embeddings(torch.LongTensor(neg_v))  # 转换后大小 [ negative_sampling_number * mini_batch_size * emb_dimension ]
        score = torch.mul(emb_w, emb_v).squeeze()
        score = torch.sum(score, dim=1)
        score = torch.clamp(score, max=10, min=-10)
        score = F.logsigmoid(score)
        neg_score = torch.mul(neg_emb_w, neg_emb_v).squeeze()
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = torch.clamp(neg_score, max=10, min=-10)
        neg_score = F.logsigmoid(-neg_score)
        # L = log sigmoid (Xw.T * θv) + [log sigmoid (-Xw.T * θv)]
        loss = -1 * (torch.sum(score) + torch.sum(neg_score))
        return loss

    def save_embedding(self, id2word, file_name):
        embedding = self.w_embeddings.weight.data.cpu().numpy()
        fout = open(file_name, 'w')
        fout.write('%d %d\n' % (len(id2word), self.emb_dimension))
        for wid, w in id2word.items():
            e = embedding[wid]
            e = ' '.join(map(lambda x: str(x), e))
            fout.write('%s %s\n' % (w, e))

model = SkipGramModel(100, 10)
id2word = dict()
for i in range(100):
    id2word[i] = str(i)
pos_w = [0, 0, 1, 1, 1]
pos_v = [1, 2, 0, 2, 3]
neg_w = [0, 0, 1, 1, 1]
neg_v = [54,55, 61, 71, 82]
model.forward(pos_w, pos_v, neg_w,neg_v)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值