8.bidirectional_recurrent_neural_network

import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
sequence_length=28
input_size=28
hidden_size=128
num_layers=2
num_classes=10
batch_size=100
num_epochs=1
learning_rate=0.001
train_dataset=torchvision.datasets.MNIST(root='../../data/',train=True,transform=transforms.ToTensor(),download=True)
test_dataset=torchvision.datasets.MNIST('../../data/',train=False,transform=transforms.ToTensor())
train_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader=torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False)
class BiRNN(nn.Module):
    def __init__(self,input_size,hidden_size,num_layers,num_classes):
        super(BiRNN,self).__init__()
        self.hidden_size=hidden_size
        self.num_layers=num_layers
        self.lstm=nn.LSTM(input_size,hidden_size,num_layers,batch_first=True,bidirectional=True)
        self.fc=nn.Linear(hidden_size*2,num_classes)
    def forward(self,x):
        h0=torch.zeros(self.num_layers*2,x.size(0),self.hidden_size).to(device)
        c0=torch.zeros(self.num_layers*2,x.size(0),self.hidden_size).to(device)
        out,_=self.lstm(x,(h0,c0))
        out=self.fc(out[:,-1,:])
        return out
model=BiRNN(input_size,hidden_size,num_layers,num_classes).to(device)
criterion=nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)
total_step=len(train_loader)
for epoch in range(num_epochs):
    for i,(images,lables) in enumerate(train_loader):
        images=images.reshape(-1,sequence_length,input_size).to(device)
        labels=lables.to(device)
        outputs=model(images)
        loss=criterion(outputs,lables)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (i+1)%100==0:
            print('epoch[{}/{}],step[{}/{}].loss:{:.4f}'.format(epoch+1,num_epochs,i+1,total_step,loss.item()))

with torch.no_grad():
    correct=0
    total=0
    for images,labels in test_loader:
        images=images.reshape(-1,sequence_length,input_size).to(device)
        labels=labels.to(device)
        outputs=model(images)
        _,predicted=torch.max(outputs.data,1)
        total+=labels.size(0)
        correct+=(predicted==lables).sum().item()
    print('test accuracy of the model on the 10000 test images:{}%'.format(100*correct/total))
torch.save(model.state_dict(),'model.ckpt')

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

我是小z呀

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值