PyTorch学习笔记(七)RNN

# 导入相关库函数
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# 配置设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 设置超参数
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 10
learning_rate = 0.01
# MNIST数据集
train_dataset = torchvision.datasets.MNIST(root='../../data',
                                          train=True,
                                          transform=transforms.ToTensor())
test_dataset = torchvision.datasets.MNIST(root='../../data',
                                         train=False,
                                         transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                          batch_size=batch_size,
                                          shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                         batch_size=batch_size,
                                         shuffle=False)
# 循环神经网络模型 (many-to-one)
class RNN(nn.Module):
    def __init__(self,input_size,hidden_size,num_layers,num_classes):
        super(RNN,self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size,hidden_size,num_layers,batch_first=True)
        self.fc = nn.Linear(hidden_size,num_classes)
    
    def forward(self,x):
        # Set initial hidden and cell states
        h0 = torch.zeros(self.num_layers,x.size(0),self.hidden_size).to(device)
        c0 = torch.zeros(self.num_layers,x.size(0),self.hidden_size).to(device)
        
        # Forward propagate LSTM
        out,_ = self.lstm(x,(h0,c0))
        
        # Decode the hidden state of the last time step
        out = self.fc(out[:,-1,:])
        return out
model = RNN(input_size,hidden_size,num_layers,num_classes).to(device)
# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
# 训练模型
total_step = len(train_loader)
for epoch in range(num_epochs):
    for i,(images,labels) in enumerate(train_loader):
        images = images.reshape(-1,sequence_length,input_size).to(device)
        labels = labels.to(device)
        
        # 前向传播
        outputs = model(images)
        loss = criterion(outputs,labels)
        
        # 后向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        if (i+1) % 100 == 0:
            print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' 
                   .format(epoch+1, num_epochs, i+1, total_step, loss.item()))
Epoch [1/10], Step [100/600], Loss: 0.4153
Epoch [1/10], Step [200/600], Loss: 0.4574
Epoch [1/10], Step [300/600], Loss: 0.2541
Epoch [1/10], Step [400/600], Loss: 0.1021
Epoch [1/10], Step [500/600], Loss: 0.3298
Epoch [1/10], Step [600/600], Loss: 0.2993
Epoch [2/10], Step [100/600], Loss: 0.2847
Epoch [2/10], Step [200/600], Loss: 0.1128
Epoch [2/10], Step [300/600], Loss: 0.0265
Epoch [2/10], Step [400/600], Loss: 0.0833
Epoch [2/10], Step [500/600], Loss: 0.0311
Epoch [2/10], Step [600/600], Loss: 0.0472
Epoch [3/10], Step [100/600], Loss: 0.0486
Epoch [3/10], Step [200/600], Loss: 0.0511
Epoch [3/10], Step [300/600], Loss: 0.0243
Epoch [3/10], Step [400/600], Loss: 0.1537
Epoch [3/10], Step [500/600], Loss: 0.0585
Epoch [3/10], Step [600/600], Loss: 0.0137
Epoch [4/10], Step [100/600], Loss: 0.0246
Epoch [4/10], Step [200/600], Loss: 0.0300
Epoch [4/10], Step [300/600], Loss: 0.0508
Epoch [4/10], Step [400/600], Loss: 0.1501
Epoch [4/10], Step [500/600], Loss: 0.0703
Epoch [4/10], Step [600/600], Loss: 0.0348
Epoch [5/10], Step [100/600], Loss: 0.1628
Epoch [5/10], Step [200/600], Loss: 0.0940
Epoch [5/10], Step [300/600], Loss: 0.0255
Epoch [5/10], Step [400/600], Loss: 0.1186
Epoch [5/10], Step [500/600], Loss: 0.0684
Epoch [5/10], Step [600/600], Loss: 0.1548
Epoch [6/10], Step [100/600], Loss: 0.0064
Epoch [6/10], Step [200/600], Loss: 0.0075
Epoch [6/10], Step [300/600], Loss: 0.0278
Epoch [6/10], Step [400/600], Loss: 0.0594
Epoch [6/10], Step [500/600], Loss: 0.0637
Epoch [6/10], Step [600/600], Loss: 0.2135
Epoch [7/10], Step [100/600], Loss: 0.0405
Epoch [7/10], Step [200/600], Loss: 0.0626
Epoch [7/10], Step [300/600], Loss: 0.0078
Epoch [7/10], Step [400/600], Loss: 0.0383
Epoch [7/10], Step [500/600], Loss: 0.0696
Epoch [7/10], Step [600/600], Loss: 0.0191
Epoch [8/10], Step [100/600], Loss: 0.0437
Epoch [8/10], Step [200/600], Loss: 0.0787
Epoch [8/10], Step [300/600], Loss: 0.0164
Epoch [8/10], Step [400/600], Loss: 0.1021
Epoch [8/10], Step [500/600], Loss: 0.0383
Epoch [8/10], Step [600/600], Loss: 0.0388
Epoch [9/10], Step [100/600], Loss: 0.0018
Epoch [9/10], Step [200/600], Loss: 0.0858
Epoch [9/10], Step [300/600], Loss: 0.1011
Epoch [9/10], Step [400/600], Loss: 0.1019
Epoch [9/10], Step [500/600], Loss: 0.0658
Epoch [9/10], Step [600/600], Loss: 0.0712
Epoch [10/10], Step [100/600], Loss: 0.1683
Epoch [10/10], Step [200/600], Loss: 0.0225
Epoch [10/10], Step [300/600], Loss: 0.0918
Epoch [10/10], Step [400/600], Loss: 0.0454
Epoch [10/10], Step [500/600], Loss: 0.0604
Epoch [10/10], Step [600/600], Loss: 0.0751
# 测试模型
model.eval()
with torch.no_grad():
    correct = 0
    total = 0
    for images,labels in test_loader:
        images = images.reshape(-1,sequence_length,input_size).to(device)
        labels = labels.to(device)
        outputs = model(images)
        _,predicted = torch.max(outputs.data,1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
        
    print('Test Accuracy of the model on the 10000 test images:{}%.'
         .format(100*correct/total))
Test Accuracy of the model on the 10000 test images:97.78%.
# 保存模型
torch.save(model.state_dict(),'model.ckpt')
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

wydxry

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值