2020-11-04

卷积神经网络学习

今天完成任务非常的快,效率很高,留下点时间想写一篇博客记录下今天的学习内容。本人是个究极懒鬼,可能是丢篇代码上去就算了。
部分代码有混乱而无序的注释,本想好好写注释但写着写着就飞了,建议无视注释以方便阅读。

import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as dset
from torchvision import datasets, transforms
import torch.nn.functional as F
from tqdm import tqdm

# 是否拥有GPU
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

# list中为数据转化的方法,将按照list中元素的顺序执行方法来处理数据
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5,), (0.5,)),]
)

# 读取数据集,使用上面声明的transform方法
trainSet = datasets.MNIST(root='./data', download=True, train=True, transform=transform)
testSet = datasets.MNIST(root='./data', download=True, train=False, transform=transform)
trainLoader = dset.DataLoader(trainSet, batch_size=64, shuffle=True)
testLoader = dset.DataLoader(testSet, batch_size=64, shuffle=False)

def binary_accuracy(preds, y):
    """
    Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
    """

    #round predictions to the closest integer
    rounded_preds = torch.round(torch.sigmoid(preds))
    correct = (rounded_preds == y).float() #convert into float for division
    acc = correct.sum() / len(correct)
    return acc

# 通用感应器网络(全连接层+relu)
class MLPNet(nn.Module):
    def __init__(self):
        super(MLPNet, self).__init__()
        self.fc1 = nn.Linear(28*28, 500)
        self.fc2 = nn.Linear(500, 256)
        self.fc3 = nn.Linear(256, 10)

    def forward(self, x):
        x = x.view(-1, 28*28)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

    def name(self):
        return "MLP"


# 最早的卷积神经网络之一,两层卷积+池化,全连接
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5, 1)
        self.conv2 = nn.Conv2d(20, 50, 5, 1)
        self.fc1 = nn.Linear(4*4*50, 500)
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)
        x = x.view(-1, 4*4*50)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

    def name(self):
        return "LeNet"

# 选择使用的网络结构
model = LeNet().to(device)

# 设置参数:训练3轮;学习率为0.002;损失函数选择交叉熵;优化方法选择随即梯度下降,使用"惯性"
epochs = 3
lr = 0.002
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.002, momentum=0.9)

# 训练

for epoch in range(epochs):
    running_loss = 0.0

    for idx, data in enumerate(trainLoader):
        inputs, labels = data[0].to(device), data[1].to(device)
        # print(model(inputs))
        optimizer.zero_grad()
        predictions = model(inputs)
        loss = criterion(predictions, labels)
        loss.backward()
        optimizer.step()
        # print(loss)
        # 每训练100批,打印一次训练情况
        running_loss += loss.item()
        if idx % 100 == 99 or idx+1 == len(trainLoader):
            print('[%d/%d, %d/%d] loss: %.3f' % (epoch+1, epochs, idx+1, len(trainLoader), running_loss/2000))

print('Training Finished.')

# 测试方法
correct = 0
total = 0

# 不需要计算梯度,不用进行反向传播
with torch.no_grad():
    for data in testLoader:
        inputs, labels = data
        inputs, labels = inputs.to(device), labels.to(device)
        # print(inputs)
        # print(labels)
        predictions = model(inputs)
        # print(predictions)
        predictions=torch.max(predictions,axis=1)
        # print(predictions[1])
        # print(labels)
        # print(predictions[1].size)
        # print(labels.size)
        ans = predictions[1]==labels
        # print(ans)
        correct += ans.sum().item()
        temp = ans.shape
        total += temp[0]
       

print('Accuracy of the network on the 10000 test images: %d %%' % (100*correct / total))

torch.save(model.state_dict(), model.name())

没错不愧是我,就这么懒

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值