pytorch进行卷积神经网络实例

import torch
import os
import numpy as np
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

batch_size = 64
learning_rate = 0.01
momentum = 0.5
EPOCH = 20  # 训练总次数
# 准备数据
# 看不懂transform是怎么写的,只知道他是将原数据映射到0,1或者说对源数据进行标准化
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_datasets = datasets.MNIST(root='./data/mnist', train=True, transform=transform, download=True)
test_datasets = datasets.MNIST(root='./data/mnist', train=False, transform=transform)
train_loader = DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_datasets, batch_size=batch_size, shuffle=False)


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(1, 10, kernel_size=5),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2),
        )
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(10, 20, kernel_size=5),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=2)
        )
        self.fc = torch.nn.Sequential(
            torch.nn.Linear(320, 50),  # 320可进行通道、高度、宽度的计算获得,也可通过程序自动输出
            # # 程序自动获得的方式为,在forward计算两层卷积层后,打印,训练集的shape
            torch.nn.Linear(50, 10),
        )

    def forward(self, x):
        batch_size = x.size(0)
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(batch_size, -1)
        # print(x.shape[1])
        x=self.fc(x)
        return x


model = Net()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")# 将模型迁移到GPU上
model.to(device)




criterion = torch.nn.CrossEntropyLoss()  # 交叉熵损失
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)  # lr学习率,momentum冲量

def train(epoch):
    running_loss=0.0
    running_total=0
    running_correct=0
    for batch_index,data in enumerate(train_loader):
        inputs,target=data
        inputs, target = inputs.to(device), target.to(device)# 将训练数据迁移到GPU上
        output=model(inputs)
        loss=criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        running_loss+=loss.item()# item返回某数据的
        running_total+=inputs.shape[0]
        _,predicted=torch.max(output.data,dim=1)
        running_correct+=(predicted==target).sum().item()
        if batch_index % 300 == 299:  # 不想要每一次都出loss,浪费时间,选择每300次出一个平均损失,和准确率
            print('[%d, %5d]: loss: %.3f , acc: %.2f %%'
                  % (epoch + 1, batch_index + 1, running_loss / 300, 100 * running_correct / running_total))
            running_loss = 0.0  # 这小批300的loss清零
            running_total = 0
            running_correct = 0  # 这小批300的acc清零
def test():
    correct=0
    total=0
    with torch.no_grad():
        for data in test_loader:
            inputs, output=data
            inputs, output = inputs.to(device), output.to(device)# 将测试数据迁移到GPU上
            predicted=model(inputs)
            _,predicted=torch.max(predicted.data,dim=1)
            total+=output.size(0)
            correct+=(predicted == output).sum().item()
    acc=correct/total
    print('[%d / %d]: Accuracy on test set: %.1f %% ' % (epoch + 1, EPOCH, 100 * acc))  # 求测试的准确率,正确数/总数
    return acc
if __name__ == '__main__':
    acc_list_test = []
    for epoch in range(EPOCH):
        train(epoch)
        # if epoch % 10 == 9:  #每训练10轮 测试1次
        acc_test = test()
        acc_list_test.append(acc_test)

    plt.plot(acc_list_test)
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy On TestSet')
    plt.show()



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值