前馈神经网络语言模型(FFNNLM)获取词向量源码实现

前馈神经网络语言模型(FFNNLM)实现获取词向量

构建词表
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Filename :vocab.py
@Description :
@Datatime :2021/08/24 17:02:51
@Author :qtxu
@Version :v1.0
'''

from collections import defaultdict,Counter

class Vocab:
    def __init__(self, tokens=None):
        self.idx_to_token = list()
        self.token_to_idx = dict()

        if tokens is not None:
            if "<unk>" not in tokens:
                tokens = tokens +["<unk>"]

            for token in tokens:
                self.idx_to_token.append(token)
                self.token_to_idx[token] = len(self.idx_to_token) - 1
            self.unk = self.token_to_idx['<unk>']        



    @classmethod
    def build(cls, text, min_freq=1, reserved_tokens=None):
        token_freqs = defaultdict(int)
        for sentence in text:
            for token in sentence:
                token_freqs[token] += 1
        uniq_tokens = ["<unk>"] + (reserved_tokens if reserved_tokens else [])
        uniq_tokens += [token for token, freq in token_freqs.items() \
                        if freq >= min_freq and token != "<unk>"]
        return cls(uniq_tokens)

    def __len__(self):
        return len(self.idx_to_token)

    def __getitem__(self, token):
        return self.token_to_idx.get(token, self.unk)

    def convert_tokens_to_ids(self, tokens):
        return [self[token] for token in tokens]

    def convert_ids_to_tokens(self, indices):
        return [self.idx_to_token[index] for index in indices]


def save_vocab(vocab, path):
    with open(path, 'w') as writer:
        writer.write("\n".join(vocab.idx_to_token))


def read_vocab(path):
    with open(path, 'r') as f:
        tokens = f.read().split('\n')
    return Vocab(tokens)
数据准备
# -*- coding: utf-8 -*-
# @Time    : 2021-08-24 16:39
# @Author  : XAT
# @FileName: utils.py
# @Software: PyCharm

import torch
from torch.utils.data import DataLoader, Dataset, TensorDataset
from vocab import Vocab
from nltk.corpus import reuters  # 从nltk中导入Reuters数据处理模块


# Constants
BOS_TOKEN = "<bos>"  # 句首标记
EOS_TOKEN = "<eos>"  # 句尾标记
PAD_TOKEN = "<pad>"  # 补齐序列长度的标记
BOW_TOKEN = "<bow>"
EOW_TOKEN = "<eow>"

WEIGHT_INIT_RANGE = 0.1


def load_reuters():
    text = reuters.sents()  # 获取Reuters数据中的所有句子(已完成标记解析)
    text = [[word.lower() for word in sentence]
            for sentence in text]  # 将语料中的词转换为小写(可选)
    vocab = Vocab.build(text, reserved_tokens=[
                        PAD_TOKEN, BOS_TOKEN, EOS_TOKEN])  # 构建词表,并传入预留标记
    corpus = [vocab.convert_tokens_to_ids(
        sentence) for sentence in text]  # 利用词表将文本数据转换为id表示

    return corpus, vocab


def save_pretrained(vocab, embeds, save_path):
    with open(save_path, "w") as writer:
        #记录词表大小
        writer.write(f"{embeds.shape[0]} {embeds.shape[1]}\n")
        for idx, token in enumerate(vocab.idx_to_token):
            vec = " ".join(["{:.4f}".format(x) for x in embeds[idx]])
            #每一行对应一个单词以及由空格分隔的词向量
            writer.write(f"{token} {vec}\n")
    print(f"Pretrained embeddings saved to:{save_path}")


def load_pretrained(load_path):
    with open(load_path, "r") as fin:
        n, d = map(int, fin.readline().split())
        tokens = []
        embeds = []
        for line in fin:
            line = line.rstrip().split(' ')
            token, embeds = line[0], list(map(float, line[1:]))
            tokens.append(token)
            embeds.append(embeds)
        vocab = Vocab(tokens)
        embeds = torch.tensor(embeds, dtype=torch.float)
    return vocab, embeds


def get_loader(dataset, batch_size, shuffle=True):
    data_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        collate_fn=dataset.collate_fn,
        shuffle=shuffle
    )
    return data_loader


def init_weights(model):
    for name, param in model.named_parameters():
        # print("------------------------------------------------------")
        # print("model.named_parameters()",model.named_parameters())
        if "embedding" not in name:
            torch.nn.init.uniform_(
                param, a=-WEIGHT_INIT_RANGE, b=WEIGHT_INIT_RANGE)

模型实现
# -*- coding: utf-8 -*-
# @Time    : 2021-08-24 16:35
# @Author  : XAT
# @FileName: 5-1.py
# @Software: PyCharm

from vocab import Vocab
from tqdm.auto import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils import BOS_TOKEN, EOS_TOKEN, BOW_TOKEN, EOW_TOKEN
from torch.utils.data import DataLoader, Dataset, TensorDataset
from utils import load_reuters, load_pretrained, save_pretrained, get_loader, init_weights

#  ---------------- 数据 --------------------------
# 从Dataset类(在torch.utils.data中定义)中派生出一个子类
# 该类实现前馈神经网络语言模型的训练数据的构建与存取功能
class NGramDataset(Dataset):
    def __init__(self, corpus, vocab, context_size=2):
        self.data = []
        self.bos = vocab[BOS_TOKEN]
        self.eos = vocab[EOS_TOKEN]
        for sentence in tqdm(corpus, desc="Dataset Construction"):
            # 插入句首、句尾标记符
            sentence = [self.bos] + sentence + [self.eos]
            # 如句子长度小于预定义的上下文大小,则跳过
            if len(sentence) < context_size:
                continue
            for i in range(context_size, len(sentence)):
                # 模型输入:长度为context_size的上文
                context = sentence[i-context_size:i]
                # 模型输出:当前词
                target = sentence[i]
                # 每个训练样本由(context,target)构成
                self.data.append((context, target))

    def __len__(self):
        return len(self.data)

    def __getitem__(self, i):
        return self.data[i]

    def collate_fn(self, examples):
        # 从独立样本集合中构建批次的输入输出,并转换为PyTorch张量类型
        inputs = torch.tensor([ex[0] for ex in examples], dtype=torch.long)
        targets = torch.tensor([ex[1] for ex in examples], dtype=torch.long)
        return (inputs, targets)


#  --------------------模型------------------------
#前馈神经网络语言模型,模型的参数主要包含词向量、由词向量层到隐含层,再隐含层到输出层的线性变换参数
class FeedFordwardNNLM(nn.Module):
    def __init__(self, vocab_size, embedding_dim, context_size, hidden_dim):
        super(FeedFordwardNNLM, self).__init__()
        # 词嵌入层
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        # 线性变换 词嵌入层-->隐含层
        self.linear1 = nn.Linear(context_size * embedding_dim, hidden_dim)
        # 线性变换 隐含层-->输出层
        self.linear2 = nn.Linear(hidden_dim, vocab_size)
        # 使用relu激活函数
        self.activate = F.relu
        init_weights(self)

    def forward(self, inputs):
        # 将输入词序列隐射为词向量,并通过view函数对映射后的词向量序列组成的三维张量进行重构,以完成词向量的拼接
        embeds = self.embeddings(inputs).view((inputs.shape[0], -1))
        hidden = self.activate(self.linear1(embeds))
        output = self.linear2(hidden)
        # 根据输出层(logits)计算概率分布并取对数,以便于计算对数似然,这里采用的是Pytorch库的log_softmax实现
        log_probs = F.log_softmax(output, dim=1)
        return log_probs


#  ----------------------------训练 --------------------------
# 再数据和模型都构建成功之后,可以对模型进行训练,并在训练完成后导出词向量矩阵
# 超参数设置
embedding_dim = 64
context_size = 2
hidden_dim = 128
batch_size = 1064
num_epoch = 10


# 读取文本数据,构建FFNNLM训练数据集(n-gram)
corpus, vocab = load_reuters()
dataset = NGramDataset(corpus, vocab, context_size)
data_loader = get_loader(dataset, batch_size)
# 负对数似然损失函数
nll_loss = nn.NLLLoss()
# 构建FFNNLM,并加载至device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = FeedFordwardNNLM(len(vocab), embedding_dim, context_size, hidden_dim)
model.to(device)
# 使用Adam优化器
optimizer = optim.Adam(model.parameters(), lr=0.001)

model.train()
total_losses = []
for epoch in range(num_epoch):
    total_loss = 0
    for batch in tqdm(data_loader, desc=f"Training Epoch{epoch}"):
        inputs, targets = [x.to(device) for x in batch]
        optimizer.zero_grad()
        log_probs = model(inputs)
        loss = nll_loss(log_probs, targets)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    print(f"Loss: {total_loss:.2f}")
    total_losses.append(total_loss)

#将词向量(model.embeddings)保存至ffnnlm.vec文件
save_pretrained(vocab, model.embeddings.weight.data, "5.1 ffnnlm.vec")
    
#####
    
#####
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

薰珞婷紫小亭子

整理不易,多多鼓励~~

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值