刘二大人《PyTorch深度学习实践》完结合集——第12课:循环神经网络(基础篇)

import torch
import matplotlib.pyplot as plt

batch_size = 1
input_size = 4
hidden_size = 4

idx2char = ['e','h','l','o']
x_data = [1,0,2,2,3]
y_data = [3,1,2,3,2]

one_hot_lookup = [[1,0,0,0],
                  [0,1,0,0],
                  [0,0,1,0],
                  [0,0,0,1]]


x_one_hot = [one_hot_lookup[x] for x in x_data] #x_one_hot是list:[[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
inputs = torch.Tensor(x_one_hot).view(-1,batch_size,input_size) #torch.Size([5, 1, 4])
labels = torch.LongTensor(y_data).view(-1,1) #torch.Size([5, 1])

class Model(torch.nn.Module):
    def __init__(self,input_size,hidden_size,batch_size):
        super(Model,self).__init__()
        self.batch_size =batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.rnncell = torch.nn.RNNCell(input_size=self.input_size,hidden_size=self.hidden_size)
    def forward(self,input,hidden):
        hidden = self.rnncell(input,hidden)
        return hidden
    def init_hidden(self):                                              #构建初始的隐层h0 [batch_size,hidden_size]
        return torch.zeros(self.batch_size,self.hidden_size)

model = Model(input_size,hidden_size,batch_size)

#Construct loss functions and optimizer.................Use Torch API...................................................#
criterion  =  torch.nn.CrossEntropyLoss()     #老师使用的是torch.nn.BCELoss(size_average=False)但是我使用这个损失太大了
optimizer =   torch.optim.Adam(model.parameters(),lr=0.1) #lr为学习率,因为0.01太小了,我改成了0.1

#plot
x_axis = []
y_axis = []

for epoch in range(10):
    optimizer.zero_grad()
    hidden = model.init_hidden()    #hidden_size = torch.tensor([1,4])
    loss = 0
    print('Predicted string:',end='')
    for input,label in zip(inputs,labels):              #torch.Size([1, 4]) torch.Size([1])
        hidden = model(input,hidden)                    #epoch=1:按seqlen第一次返回h1_1,第二次返回h1_1,h1_2,....返回所有的hidden
        loss += criterion(hidden,label)
        _,idx = hidden.max(dim=1)                       #按dim=1就是batch_size或者说按行取最大值和最大值所在的下标索引值,
        print(idx2char[idx.item()],end='')
    loss.backward()
    optimizer.step()
    print(', Epoch [%d/15] loss=%.4f' % (epoch+1,loss.item()))
    x_axis.append(epoch)
    y_axis.append(loss.item())
    # drawing.....................................................................................................#
plt.figure(figsize=(7, 7), dpi=80)  # 创建画布
plt.plot(x_axis, y_axis, color='b', linestyle='-')  # 绘制折线图,点划线
plt.xlabel('epoch')  # 设置图x轴标签
plt.ylabel('Accuracy rate')  # 设置图y轴标签
plt.legend(["loss"], title='loss&epoch', loc='upper right', fontsize=15)  # 设置图列
plt.show()

D:\Anaconda\envs\study\python.exe "D:\python pycharm learning\刘二大人课程\P\P12.py" 
Predicted string:ooooe, Epoch [1/15] loss=7.9872
Predicted string:ooooe, Epoch [2/15] loss=6.8451
Predicted string:olool, Epoch [3/15] loss=5.9409
Predicted string:olool, Epoch [4/15] loss=5.3361
Predicted string:ollol, Epoch [5/15] loss=4.9198
Predicted string:oolol, Epoch [6/15] loss=4.5413
Predicted string:oolol, Epoch [7/15] loss=4.1052
Predicted string:oolol, Epoch [8/15] loss=3.6482
Predicted string:ohlol, Epoch [9/15] loss=3.3346
Predicted string:ohlol, Epoch [10/15] loss=3.2157

Process finished with exit code 0
 

epoch =10 就得出之前结果ohlol

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值