import math
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
#RNN最突出特征为后面的输入与前面的输入有关
batch_size,num_steps=32,35
#train——iter迭代器
train_iter,vocab=d2l.load_data_time_machine(batch_size,num_steps)
#[0,2]表示0何2下标,len为字典的大小表示有多少个词,每个词表示为len长度的向量
print(vocab)
F.one_hot(torch.tensor([0,2]),len(vocab))
print(F.one_hot(torch.tensor([0,2]),len(vocab)))
X=torch.arange(10).reshape((2,5))
F.one_hot(X.T,28).shape
print(F.one_hot(X.T,28).shape)
#vocab_size 词的大小,device决定GPUorCPU
def get_params(vocab_size,num_hiddens,device):
num_inputs=num_outputs=vocab_size #输入为词的size,要预测下一个词,可以是vocab里面的任何一个字因此size一样
def normal(shape):
return torch.randn(size=shape,device=device)*0.01
#隐藏层
W_xh=normal((num_inputs,num_hiddens))#对输入X映射到隐藏层
W_hh=normal((num_hiddens,num_hiddens))#上一个hidden到下一个hidden
b_h=torch.zeros(num_hiddens,device=device)#每一个隐藏元都有一个b,num_hiddens为隐藏层的隐藏元个数
#输出层
W_hq=normal((num_hiddens,num_outputs))#输出层
b_q=torch.zeros(num_outputs,device=device)#输出层的b
params=[W_xh,W_hh,b_h,W_hq,b_q]
for param in params:
param.requires_grad_(True)#计算梯度
return params
#初始化隐藏状态,在0时刻不知道隐藏状态因此需要该函数
#张量全部用0填充,形状为(批量大小,隐藏单元数)
def init_rnn_state(batch_size,num_hiddens,device):
return (torch.zeros((batch_size,num_hiddens),device=device),)
#做计算
def rnn(inputs,state,params):
# inputs的形状:(时间步数量,批量大小,词表大小),因此就能跑T个循环
W_xh,W_hh,b_h,W_hq,b_q=params
H, =state#初始化隐藏状态
outputs=[]
for X in inputs:#inputs为时间的大小
H=torch.tanh(torch.mm(X,W_xh)+torch.mm(H,W_hh)+b_h)#torch.mm矩阵乘法,H可认为是前一个时间的隐藏状态
Y=torch.mm(H,W_hq)+b_q
outputs.append(Y)#Y为当前状态的预测,append拼接
return torch.cat(outputs,dim=0),(H,) #每一个时间的输出构成的output,结果为二维的矩阵,并输出更新后的隐藏状态
#包装函数
class RNNModelScratch: #@save
"""从零开始实现的循环神经网络模型"""
def __init__(self, vocab_size, num_hiddens, device,
get_params, init_state, forward_fn):
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens, device)
self.init_state, self.forward_fn = init_state, forward_fn
def __call__(self, X, state):#X:(批量大小,时间步数)
X = F.one_hot(X.T, self.vocab_size).type(
torch.float32)
return self.forward_fn(X, state, self.params)#前向转播使用RNN,此处调用上面定义的rnn函数
def begin_state(self, batch_size, device):#定义初始状态
return self.init_state(batch_size, self.num_hiddens, device)
#在此处做了模型训练,只是搭建好了模型
num_hiddens=512
net=RNNModelScratch(len(vocab),num_hiddens,d2l.try_gpu(), get_params,
init_rnn_state, rnn)
state = net.begin_state(X.shape[0], d2l.try_gpu())
Y, new_state = net(X.to(d2l.try_gpu()), state)
print(Y.shape,len(new_state),new_state[0].shape)
#定义预测函数来生成prefix之后的新字符
def predict_ch8(prefix,num_preds,net,vocab,device):
"""prefix为所给句子的开头,预测之后的事情"""
#num_preds所需生成的词
state=net.begin_state(batch_size=1,device=device)#生成初始状态
outputs=[vocab[prefix[0]]]#拿到prefix第一个字
#output[-1]指把最近新生成的词作为输入
get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))
#0已经放在了output里面,因此遍历1到之后的词
for y in prefix[1:]: # 预热期,
_, state = net(get_input(), state) #通过遍历存进state
outputs.append(vocab[y]) #通过遍历把prefix存入output里面
for _ in range(num_preds): # 预测num_preds步
y, state = net(get_input(), state)
outputs.append(int(y.argmax(dim=1).reshape(1)))
return ''.join([vocab.idx_to_token[i] for i in outputs])#把index转成token
print(predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu()))#在往后预测10个
#梯度剪裁
def grad_clipping(net,theta):
"""梯度裁剪"""
if isinstance(net, nn.Module):
params = [p for p in net.parameters() if p.requires_grad]
else:
params = net.params
norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))#p的平方求和再开方
if norm > theta:#梯度过大,进行裁剪
for param in params:
param.grad[:] *= theta / norm
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
"""训练网络一个迭代周期(定义见第8章)"""
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # 训练损失之和,词元数量
for X, Y in train_iter: #X从train_iter中获得
if state is None or use_random_iter:
# 在第一次迭代或使用随机抽样时初始化state
state = net.begin_state(batch_size=X.shape[0], device=device)
else:
if isinstance(net, nn.Module) and not isinstance(state, tuple):
# state对于nn.GRU是个张量
state.detach_()
else:
# state对于nn.LSTM或对于我们从零开始实现的模型是个张量
for s in state:
s.detach_()
y = Y.T.reshape(-1)
X, y = X.to(device), y.to(device)
y_hat, state = net(X, state) #向网络中输入input和初始向量
l = loss(y_hat, y.long()).mean()
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.backward()
grad_clipping(net, 1) #梯度裁剪
updater.step()
else:
l.backward()
grad_clipping(net, 1)
# 因为已经调用了mean函数
updater(batch_size=1)
metric.add(l * y.numel(), y.numel())
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, device,
use_random_iter=False):
"""训练模型(定义见第8章)"""
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
# 初始化
if isinstance(net, nn.Module):#判断断两个对象类型是否相同
updater = torch.optim.SGD(net.parameters(), lr)
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)
# 训练和预测
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:#每10个周期输出一个
print(predict('my dream is'))
animator.add(epoch + 1, [ppl])
print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
print(predict('my dream is'))
print(predict('my dream is'))
num_epochs, lr = 200, 1
train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())
[pytorch]RNN学习
于 2023-04-28 02:25:45 首次发布