跟着问题学9——word2vec代码实战

代码

数据处理

流程如下图所示

import torch
import torch.nn as nn
import io
import os
import sys
import requests
from collections import OrderedDict
import math
import random
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset
from time import sleep
# #下载语料用来训练word2vec
# def download():
#     #可以从百度云服务器下载一些开源数据集(dataset.bj.bcebos.com)
#     text_url = "https://dataset.bj.bcebos.com/word2vec/text8.txt"
#     #使用python的requests包下载数据集到本地
#     web_request = requests.get(text_url)
#     text = web_request.content
#     #把下载后的文件存储在当前目录的text8.txt文件内
#     with open("./text8.txt", "wb") as f:
#         f.write(text)
#     f.close()
## #读取整个语料库数据
def load_text(filepath):
    with open(filepath,'r') as f:
       corpus=f.read()#.strip("\n")
    f.close()
    return corpus

#对语料库中的词语进行切分,去除换行符,空格等,并可以将大写转化为小写
def word_preprocess(corpus):
    # strip() 方法在没有参数的情况下,会去除字符串开头和结尾的所有空白字符(该方法只能删除开头或是结尾的字符,不能删除中间部分的字符。)
    # 有参数则只去除指定参数
    # 这包括空格制表符(\t)、换行符(\n)、回车符(\r)、换页符(\f)和垂直制表符(\v)。
    corpus=corpus.strip()#.lower()
    #通过指定分隔符对一个完整的字符串进行切片,如果参数 num 有指定值,将一个完整的字符串分隔 num+1 个子字符串
    corpus=corpus.split(" ")
    return corpus

# #在经过切词后,需要对语料进行统计,为每个词构造ID。一般来说,可以根据每个词在语料中出现的频次构造ID,频次越高,ID越小,便于对词典进行管理。
# # 构造词典,统计每个词的频率,并根据频率将每个词转换为一个整数id
def word_freq2id(corpus):
# 首先统计每个不同词的频率(出现的次数),使用一个词典记录
    word_freq_dict=dict()
    for word in corpus:
        #如果第一次不在词典里,则创建键值对,初始数字为0;
        if word not in word_freq_dict:
            word_freq_dict[word]=0
        #若已存在词典里,则数字+1
        word_freq_dict[word]+=1

    #将这个词典中的词,按照出现次数排序,出现次数越高,排序越靠前,赋予的id值越小
    # 一般来说,出现频率高的高频词往往是:I,the,you这种代词,而出现频率低的词,往往是一些名词,如:nlp
    word_freq_dict=sorted(word_freq_dict.items(),key=lambda x:x[1],reverse=True)
     # 构造3个不同的词典,分别存储,
     # 每个词到id的映射关系:word2id_dict
     # 每个id出现的频率:word2id_freq
     # 每个id到词典映射关系:id2word_dict
    word2id_dict=dict()
    word2id_freq=dict()
    id2word_dict=dict()

 # 按照频率,从高到低,开始遍历每个单词,并为这个单词构造一个独一无二的id
    for word,freq in word_freq_dict:
        #根据word词典当前存储的词数作为频率,前面已经排序,频率越高,越靠前,id越小
        curr_id=len(word2id_dict)
        word2id_dict[word]=curr_id
        word2id_freq[word2id_dict[word]]=freq
        id2word_dict[curr_id]=word
    return word2id_dict,word2id_freq,id2word_dict

# #把语料转换为id序列
def corpus2id(corpus,word2id_dict):
    # 使用一个循环,将语料中的每个词替换成对应的id,以便于神经网络进行处理
    corpus=[word2id_dict[word] for word in corpus]
    return corpus

# 使用下采样算法(subsampling)处理语料,强化训练效果
def subsampling(corpus, word2id_freq):
    # 这个discard函数决定了一个词会不会被替换,这个函数是具有随机性的,每次调用结果不同
    # 如果一个词的频率很大,那么它被遗弃的概率就很大
    def discard(word_id):
        return random.uniform(0, 1) < 1 - math.sqrt(
            1e-4 / word2id_freq[word_id] * len(corpus))

    corpus = [word for word in corpus if not discard(word)]
    return corpus

def create_context_target(corpus,window_size):
    #python x[:] x[::] x[:::]用法
    # 负数在左侧,则从后往前数n个的位置开始
    #负数在右侧,则是排除了后n个的位置结束
    #所以这里的target是把语料库前后window_size个字符排除,确保每个target都有window_size大小的上下文
    targets=corpus[window_size:-window_size]
    contexts=[]
    total=len(corpus)-window_size-window_size
    #  tqdm是Python中专门用于进度条美化的模块,通过在非while的循环体内嵌入tqdm,可以得到一个能更好展现程序运行过程的提示进度条
    #这里遍历每一个target,以其为中心,左右window_size范围寻找上下文
    for idx in tqdm(range(window_size,len(corpus)-window_size),total=total,leave=False):
        context=[]
        for t in range(-window_size,window_size+1):
            if t==0:#此时就是target,略过
                continue
            context.append(corpus[idx+t])
        #每一个target对应一个context列
        contexts.append(context)
    return contexts,targets

class NegativeSampler:
    def __init__(self,word2id_dict,word2id_freq,id2word_dict,neg_num,power=0.75):
        self.word2id_dict=word2id_dict
        self.word2id_freq=word2id_freq
        self.id2word_dict=id2word_dict
        self.neg_num=neg_num
        #计算存储所有单词的频率之和
        total_freq=0
        for word_id,freq in word2id_freq.items():
            #把每个中心词的频率进行幂计算
            new_freq=math.pow(freq,power)
            word2id_freq[word_id]=new_freq
            total_freq+=new_freq
        #存储词语总数
        self.vocab_size=len(word2id_freq)
        #存储计算每个词汇被选取成负样本的概率值,以词汇在语料库中出现的频率比值计算得到
        self.neg_word_prob=np.zeros(self.vocab_size)
        for word_id,freq in word2id_freq.items():
            self.neg_word_prob[word_id]=freq/total_freq

    def negative_sample(self,target):
       #中心词的数量
       target_size=len(target)
       #计算得到的负样本是一个target_size*neg_num的张量
       negative_sample=np.zeros((target_size,self.neg_num),dtype=np.int32)
       for i in range(target_size):
           #浅拷贝(copy):拷贝父对象,不会拷贝对象的内部的子对象。
           p=self.neg_word_prob.copy()
           target_idx=target[i]
           #中心词不会被选作负样本,概率值为0
           print(target_idx)
           p[target_idx]=0
           p/=p.sum()
           #从大小为3的np.arange(5)生生成一个非均匀的随机样本,没有替换(重复):
           #>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
           #array([2, 3, 0])
           negative_sample[i,:]=np.random.choice(self.vocab_size,size=self.neg_num,replace=False,p=p)
       return negative_sample

#自定义数据集,继承Dataset类并重写类的三个函数
class CBOWDataset(Dataset):
    a = 0
    def __init__(self,contexts,targets,negative_sampler):
        self.contexts=contexts
        self.targets=targets
        self.negative_sampler=negative_sampler
    def __len__(self):
        return len(self.contexts)
    def __getitem__(self,idx):

        #对于一组样本,给定idx,输出对应的context(2*window_size大小)在cbow模型里作为输入,target(包含一个正样本中心词和neg个随机抽取的负样本)
        # 及标签labels(和target一一对应,正样本中心词为1,负样本为0)
        contexts=self.contexts[idx]
        targets=[self.targets[idx]]
        #这里会返回一个中心词所对应的neg个负样本
        negative_samples=self.negative_sampler.negative_sample(targets)
        #targets实际上等效于实际的输出值,包含一个正样本中心词和neg个随机抽取的负样本
        targets+=[x for x in negative_samples[0]]
        labels=[1]+[0 for _ in range(len((negative_samples[0])))]

        item={
            "contexts":contexts,
            "targets":targets,
            "labels":labels
        }

        if self.a==0:
            print("item:")
            print(item)
            self.a=1
        return item
    def generate_batch(self, item_list):
        contexts = [x["contexts"] for x in item_list]
        targets = [x["targets"] for x in item_list]
        labels = [x["labels"] for x in item_list]

        outputs = {
            "contexts": torch.LongTensor(contexts),
            "targets": torch.LongTensor(targets),
            "labels": torch.LongTensor(labels),
        }

        return outputs

class SkipGramDataset(Dataset):
    def __init__(self, contexts, centers, negative_sampler):
        self.contexts = contexts
        self.centers = centers
        self.negative_sampler = negative_sampler

    def __len__(self):
        return len(self.contexts)

    def __getitem__(self, idx):
        #skip和cbow的核心区别就是,中心词作为输入,上下文词和负采样值作为输出
        center = self.centers[idx]
        context = self.contexts[idx]
        negative_samples = self.negative_sampler.get_negative_sample(context)
        #z.reshape(-1)变成只有一行的数组
        #.tolist()将数组或矩阵转化为列表
        negative_samples = negative_samples.reshape(-1).tolist()
        label = [1] * len(context) + [0] * len(negative_samples)
        context_negative_samples = context + negative_samples

        item = {
            "center": center,
            "context": context_negative_samples,
            "label": label,
        }
        return item

    def generate_batch(self, item_list):
        center_ids = [x["center"] for x in item_list]
        context_ids = [x["context"] for x in item_list]
        labels = [x["label"] for x in item_list]

        outputs = {
            "center_ids": torch.LongTensor(center_ids),
            "context_ids": torch.LongTensor(context_ids),
            "labels": torch.LongTensor(labels),
        }

        return outputs
def test():
    import os
    import sys
    from torch.utils.data import DataLoader
    os.chdir(sys.path[0])

    filepath = "./text8.txt"
    window_size = 5
    neg_num = 3

    #读取语料库
    corpus=load_text(filepath)
    print("s1")
    print(corpus[:10])
    #语料库预处理
    corpus=word_preprocess(corpus)
    print("s2")
    print(corpus[:10])
    #词汇标签化
    word2id_dict, word2id_freq, id2word_dict=word_freq2id(corpus)
    corpus=corpus2id(corpus,word2id_dict)
    print("s3")
    print(corpus[:10])
        #下采样
   # corpus=subsampling(corpus,word2id_freq)

    #中心词及上下文配对选择
    contexts,targets=create_context_target(corpus,window_size)
    print("s6")
    print(contexts[:10])
    print("s7")
    print(targets[:10])
    #负采样
    negative_sampler=NegativeSampler(word2id_dict, word2id_freq, id2word_dict,neg_num=neg_num)
    print("s8")
  #  print(negative_sampler[:10])
    cbow_dataset=CBOWDataset(contexts,targets,negative_sampler)

    cbow_dataloader = DataLoader(
        dataset=cbow_dataset,
        batch_size=10,
        shuffle=False,
        collate_fn=cbow_dataset.generate_batch,
    )

    for batch in tqdm(cbow_dataloader, total=len(cbow_dataloader)):
        pass

    # sg_dataset = SkipGramDataset(contexts, targets, negative_sampler)
    # sg_dataloader = DataLoader(
    #     dataset=sg_dataset,
    #     batch_size=10,
    #     shuffle=False,
    #     collate_fn=sg_dataset.generate_batch,
    # )
    #
    # for batch in tqdm(sg_dataloader, total=len(sg_dataloader)):
    #     pass


if __name__ == "__main__":
    test()


训练

trainer.py训练过程可视化

import os
import torch
from tqdm import tqdm
from tensorboardX import SummaryWriter

class Trainer():
    def __init__(self,
                 model,
                 optimizer,
                 train_dataloader,
                 outputs_dir,
                 num_epochs,
                 device,
                 ):
        self.model = model
        self.optimizer = optimizer
        self.train_dataloader = train_dataloader
        self.outputs_dir = outputs_dir
        self.num_epochs = num_epochs
        self.device = device
        self.writer = SummaryWriter(outputs_dir)

    def train(self):
        model = self.model
        optimizer = self.optimizer
        train_dataloader = self.train_dataloader
        total_loss = 0

        for epoch in tqdm(range(self.num_epochs), total=self.num_epochs):
            epoch_loss = 0
            for idx, batch in tqdm(enumerate(train_dataloader), total=len(train_dataloader), leave=False,
                                   desc=f"Epoch {epoch + 1}"):
                inputs = {k: v.to(self.device) for k, v in batch.items()}

                loss = model(inputs)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()
                total_loss += loss.item()

                global_step = epoch * len(train_dataloader) + idx + 1
                avg_loss = total_loss / global_step
                self.writer.add_scalar("Train-Step-Loss", avg_loss, global_step=global_step)

            epoch_loss /= len(train_dataloader)
            self.writer.add_scalar("Train-Epoch-Loss", epoch_loss, global_step=epoch)
            for name, params in model.named_parameters():
                self.writer.add_histogram(name, params, global_step=epoch)

            save_name = f"model_{epoch}.pth"
            save_path = os.path.join(self.outputs_dir, save_name)
            torch.save(model.state_dict(), save_path)
 

train.py实际训练代码

import os
import sys
import time
import torch
import pickle
from torch.utils.data import DataLoader

from tools import word2vec_trainer
from models.nlp import word2vec
from tools import word2vec_build_data
from tools.word2vec_build_data import *

def train_cbow():
    #../..")))  # 返回上上个目录
    filepath = "../../tools/text8.txt"
    #超参数的设置,包括
    window_size = 5    #上下文窗口
    embed_dim = 100    #词向量维度
    batch_size = 100   #批大小
    num_epochs = 10    #训练epoch
    neg_num = 5        #负样本数
    learning_rate = 1e-3 #学习率
    #记录开始训练时间,start
    now_time = time.strftime("%Y%m%d-%H%M%S", time.localtime())
    outputs_dir = f"../outputs/cbow-{now_time}"
    os.makedirs(outputs_dir, exist_ok=True)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    # 读取语料库
    corpus= load_text(filepath)
    # 语料库预处理
    corpus=word_preprocess(corpus)
    # 词汇标签化
    word2id_dict,word2freq_dict,id2word_dict=word_freq2id(corpus)
    corpus=corpus2id(corpus,word2id_dict)
    # 下采样
    corpus=subsampling(corpus,word2freq_dict)
    # 中心词及上下文配对选择
    contexts, targets = create_context_target(corpus, window_size)
    #计算语料库词汇总数
    vocab_size = len(word2id_dict)
    corpus_info = {
        "word2id": word2id_dict,
        "id2word": id2word_dict,
    }
    save_path = os.path.join(outputs_dir, "corpus_info.pkl")
    with open(save_path, "wb") as f:
        pickle.dump(corpus_info, f)
    #负采样
    negative_sampler = NegativeSampler(word2id_dict,word2freq_dict,id2word_dict, neg_num)
    #利用重写的数据类加载数据集
    train_dataset = CBOWDataset(
        contexts=contexts,
        targets=targets,
        negative_sampler=negative_sampler,
    )

    train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.generate_batch,
        num_workers=0,
        pin_memory=True,
    )

    model = word2vec.CBOW(vocab_size, embed_dim)
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    trainer = word2vec_trainer.Trainer(
        model=model,
        optimizer=optimizer,
        train_dataloader=train_dataloader,
        outputs_dir=outputs_dir,
        num_epochs=num_epochs,
        device=device,
    )

    trainer.train()


def train_skipgram():
    filepath = "../../tools/text8.txt"
    window_size = 5
    embed_dim = 100
    batch_size = 100
    num_epochs = 10
    negative_sample_size = 5
    learning_rate = 1e-3
    now_time = time.strftime("%Y%m%d-%H%M%S", time.localtime())
    outputs_dir = f"../outputs/skipgram-{now_time}/"
    os.makedirs(outputs_dir, exist_ok=True)
    device = torch.device("cuda")

    corpus, word2id, id2word = load_text(filepath)
    contexts, targets = create_context_target(corpus, window_size)
    vocab_size = len(word2id)

    corpus_info = {
        "corpus": corpus,
        "word2id": word2id,
        "id2word": id2word,
        "contexts": contexts,
        "targets": targets,
    }

    with open("../../tools/text8.txt", "wb") as f:
        pickle.dump(corpus_info, f)

    negative_sampler = NegativeSampler(corpus, negative_sample_size)

    train_dataset = SkipGramDataset(
        contexts=contexts,
        centers=targets,
        negative_sampler=negative_sampler,
    )

    train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=train_dataset.generate_batch,
        num_workers=0,
        pin_memory=True,
    )
    model = word2vec.SkipGram(vocab_size, embed_dim)
    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # trainer = Trainer(
    #     model=model,
    #     optimizer=optimizer,
    #     train_dataloader=train_dataloader,
    #     outputs_dir=outputs_dir,
    #     num_epochs=num_epochs,
    #     device=device,
    # )
    #
    # trainer.train()

if __name__ == "__main__":
    os.chdir(sys.path[0])
    train_cbow()
   # train_skipgram()

参考资料

  1. 《动手学深度学习》 — 动手学深度学习 2.0.0 documentation

word2vec原理(一): 词向量、CBOW与Skip-Gram模型基础-CSDN博客

https://zhuanlan.zhihu.com/p/638558204

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值