提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
前言
阅读了知乎上的一篇文章NLP基础模型(1)——NNLM很喜欢这篇文章的写作思路,因此准备借鉴这个思路来记笔记。
一、TextLSTM模型
传统RNN模型:由于梯度消失的原因只有短期记忆。神经网络模块较简单,例如一个tanh层。
LSTM模型(long short-term memory):通过精妙的门控制将长短期记忆结合起来,在一定程度上解决了梯度消失的问题。
LSTM每一部分的具体实现细节参考资料:
二、具体实现
学习深度学习模型要考虑的关键问题如下:
- 模型任务是什么?
- 输入数据是什么?
- 输入数据的embedding方式是什么?
- embedding后的shape是什么?
- 模型的网络结构是怎样的?
- 按层来看,每一层的主要功能是?
- 每层对应公式是?
- loss函数是什么?
- 优化器是什么?
- 输出数据是什么?
- 输出数据的shape是什么?
- 评价指标是什么?
1.数据预处理
-
模型任务
本文以简单数据集和网络结构实现TextLSTM,训练集中包括10个单词,用每个单词的前三个字符去预测最后一个字符。 -
输入数据
数据集中单词的前三个字符,如’mak’, ‘nee’。 -
创建数据和字典
对所有要进行训练的数据创建字典。
char_arr = [c for c in 'abcdefghijklmnopqrstuvwxyz']
word_dict = {n: i for i, n in enumerate(char_arr)}
number_dict = {i: w for i, w in enumerate(char_arr)}
n_class = len(word_dict) # number of class(=number of vocab)
seq_data = ['make', 'need', 'coal', 'word', 'love', 'hate', 'live', 'home', 'hash', 'star']
- 创建batch
def make_batch():
input_batch, target_batch = [], []
for seq in seq_data:
input = [word_dict[n] for n in seq[:-1]] # 'm', 'a' , 'k' is input
target = word_dict[seq[-1]] # 'e' is target
input_batch.append(np.eye(n_class)[input])//np.eye(n)创建大小为n的单位矩阵
target_batch.append(target)
return input_batch, target_batch
- 定义网络参数
n_step = 3 # number of cells(= number of Step)
n_hidden = 128 # number of hidden units in one cell
2.创建网络
class TextLSTM(nn.Module):
def __init__(self):
super(TextLSTM, self).__init__()
self.lstm = nn.LSTM(input_size=n_class, hidden_size=n_hidden)
self.W = nn.Linear(n_hidden, n_class, bias=False)
self.b = nn.Parameter(torch.ones([n_class]))
def forward(self, X):
input = X.transpose(0, 1) # X : [n_step, batch_size, n_class]
hidden_state = torch.zeros(1, len(X), n_hidden) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
cell_state = torch.zeros(1, len(X), n_hidden) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
outputs = outputs[-1] # [batch_size, n_hidden]
model = self.W(outputs) + self.b # model : [batch_size, n_class]
return model
- loss函数与优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
CrossEntropy内置了softmax的实现。
3.模型训练
for epoch in range(1000):
optimizer.zero_grad()
output = model(input_batch)
loss = criterion(output, target_batch)
if (epoch + 1) % 100 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
inputs = [sen[:3] for sen in seq_data]
predict = model(input_batch).data.max(1, keepdim=True)[1]
print(inputs, '->', [number_dict[n.item()] for n in predict.squeeze()])
//troch.max(1)[1], 返回每一行中最大值的那个元素的索引,data 只返回variable中的数据部分
//item()方法得到张量的值
代码
完整代码如下:
# %%
# code by Tae Hwan Jung @graykode
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import snoop
def make_batch():
input_batch, target_batch = [], []
for seq in seq_data:
input = [word_dict[n] for n in seq[:-1]] # 'm', 'a' , 'k' is input
target = word_dict[seq[-1]] # 'e' is target
input_batch.append(np.eye(n_class)[input])
target_batch.append(target)
return input_batch, target_batch
class TextLSTM(nn.Module):
def __init__(self):
super(TextLSTM, self).__init__()
self.lstm = nn.LSTM(input_size=n_class, hidden_size=n_hidden)
self.W = nn.Linear(n_hidden, n_class, bias=False)
self.b = nn.Parameter(torch.ones([n_class]))
def forward(self, X):
input = X.transpose(0, 1) # X : [n_step, batch_size, n_class]
hidden_state = torch.zeros(1, len(X), n_hidden) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
cell_state = torch.zeros(1, len(X), n_hidden) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
outputs = outputs[-1] # [batch_size, n_hidden]
model = self.W(outputs) + self.b # model : [batch_size, n_class]
return model
if __name__ == '__main__':
n_step = 3 # number of cells(= number of Step)
n_hidden = 128 # number of hidden units in one cell
char_arr = [c for c in 'abcdefghijklmnopqrstuvwxyz']
word_dict = {n: i for i, n in enumerate(char_arr)}
number_dict = {i: w for i, w in enumerate(char_arr)}
n_class = len(word_dict) # number of class(=number of vocab)
seq_data = ['make', 'need', 'coal', 'word', 'love', 'hate', 'live', 'home', 'hash', 'star']
model = TextLSTM()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
input_batch, target_batch = make_batch()
input_batch = torch.FloatTensor(input_batch)
target_batch = torch.LongTensor(target_batch)
# Training
for epoch in range(1000):
optimizer.zero_grad()
output = model(input_batch)
loss = criterion(output, target_batch)
if (epoch + 1) % 100 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
inputs = [sen[:3] for sen in seq_data]
predict = model(input_batch).data.max(1, keepdim=True)[1]
print(inputs, '->', [number_dict[n.item()] for n in predict.squeeze()])
['mak', 'nee', 'coa', 'wor', 'lov', 'hat', 'liv', 'hom', 'has', 'sta'] -> ['e', 'd', 'l', 'd', 'e', 'e', 'e', 'e', 'h', 'r']
参考资料