本文通过RNN学习hello->ohlol的规律
1.处理字符
因为字符不是数字,无法变成向量输入,所以必须将字符变成数字,这里用到了one-hot vector
通过将字符变成对应的索引,然后将索引变成one-hot vector,将其作为RNN的输入,这里因为只有4个字母,所以输入维度为4
这里要求第一个输出为0,第二个为h,第三个为l,所以这是一个多分类的问题,需要输出一个四维向量,每一维度表示为某个字母的概率
这样我们的每一个输入Xi就是one-hot vector的一行如[0 1 0 0]
2.通过RNNcell实现
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
import os
import sys
input_size = 4
hidden_size = 4
batch_size = 1
#字典
idx2char = ['e','h','l','o']
x_data = [1,0,2,2,3]
y_data = [3,1,2,3,2]
one_hot_lookup = [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
]
x_one_hot = [one_hot_lookup[x] for x in x_data]
#[[0, 1, 0, 0],
# [1, 0, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]]
print(x_one_hot)
#torch.Size([5, 4])
inputs = torch.Tensor(x_one_hot)
print(inputs.shape)
#torch.Size([5, 1, 4]) [seqlen,batch_size,input_size]
inputs = inputs.view(-1,batch_size,input_size)
print(inputs.shape)
#torch.Size([5, 1]) [seqlen,1]
labels = torch.LongTensor(y_data).view(-1,1)
print(labels.shape)
#模型
class Model(torch.nn.Module):
def __init__(self,input_size,hidden_size,batch_size):
super(Model,self).__init__()
self.batch_size = batch_size
self.input_size = input_size
self.hidden_size = hidden_size
self.rnncell = torch.nn.RNNCell(input_size = self.input_size,hidden_size = self.hidden_size)
def forward(self,input,hidden):
hidden = self.rnncell(input,hidden)
return hidden
def init_hidden(self):
return torch.zeros(self.batch_size,self.hidden_size)
net = Model(input_size,hidden_size,batch_size)
#loss
criterion = torch.nn.CrossEntropyLoss()
#优化器
optimzer = torch.optim.Adam(net.parameters(),lr = 0.1)
#训练
for epoch in range(15):
loss = 0
optimzer.zero_grad()
hidden = net.init_hidden()
print('Predicted string :',end='')
for input,label in zip(inputs,labels):
hidden = net(input,hidden)
loss+=criterion(hidden,label)
_,idx = hidden.max(dim = 1)
print(idx2char[idx.item()],end='')
loss.backward()
optimzer.step()
print(',Epoch [%d/15] loss =%.4f' % (epoch+1,loss.item()))
结果:
3.通过torch.RNN实现
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
import os
import sys
input_size = 4
hidden_size = 4
batch_size = 1
num_layers = 1
seq_len =5
#字典
idx2char = ['e','h','l','o']
x_data = [1,0,2,2,3]
y_data = [3,1,2,3,2]
one_hot_lookup = [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
]
#x_one_hot 拿one_hot_lookup第几行
x_one_hot = [one_hot_lookup[x] for x in x_data]
#[[0, 1, 0, 0],
# [1, 0, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]]
print(x_one_hot)
#torch.Size([5, 4])
inputs = torch.Tensor(x_one_hot)
print(inputs.shape)
#torch.Size([5, 1, 4]) [seqlen,batch_size,input_size]
inputs = inputs.view(-1,batch_size,input_size)
print(inputs.shape)
#torch.Size([5, 1]) [seqlen,1]
labels = torch.LongTensor(y_data)
print(labels.shape)
#模型
class Model(torch.nn.Module):
def __init__(self,input_size,hidden_size,batch_size,num_layers):
super(Model,self).__init__()
self.batch_size = batch_size
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = torch.nn.RNN(input_size = self.input_size,hidden_size = self.hidden_size,num_layers = self.num_layers)
def forward(self,input):
hidden = torch.zeros(self.num_layers,
self.batch_size,
self.hidden_size)
out,_ = self.rnn(input,hidden)
return out.view(-1,self.hidden_size)
net = Model(input_size,hidden_size,batch_size,num_layers)
#loss
criterion = torch.nn.CrossEntropyLoss()
#优化器
optimzer = torch.optim.Adam(net.parameters(),lr = 0.05)
#训练
for epoch in range(15):
loss = 0
optimzer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs,labels)
loss.backward()
optimzer.step()
_,idx = outputs.max(dim = 1)
idx = idx.data.numpy()
print('Predicted: ',''.join([idx2char[x] for x in idx]),end='')
print(',Epoch [%d/15] loss =%.3f' % (epoch+1,loss.item()))