《PyTorch深度学习实践》卷积神经网络

下采样:降低运算需求

用view来展开

并且每个卷积核通道的值可以不同。输入的通道数等于卷积核的通道数。输出通道等于卷积核的个数

三通道的公式

输入维度是n,卷积核的个数是m,故输出通道等于卷积核的个数。与卷积核通道无关。

定义的时候注意,输入通道,输出通道,及卷积核的大小

maxpooling会自动设置步长为2

如果代码里面没有写就默认stride等于1,padding为0

GPU运算

  1. 模型迁入GPU

2.将张量移入GPU

import matplotlib.pyplot as plt
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as ply

##1.准备数据集
batch_size=64
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.1307,),(0.3081,))])

train_dataset = datasets.MNIST(root='./dataset/mnist/',
                               train = True,
                               download = True,
                               transform = transform)
train_loader = DataLoader(train_dataset,
                           shuffle=True,
                           batch_size=batch_size,
                          num_workers=4)

test_dataset = datasets.MNIST(root='./dataset/mnist/',
                              train = False,
                              download = True,
                              transform=transform)
test_loader = DataLoader(test_dataset,
                         shuffle=False,
                         batch_size = batch_size,
                         num_workers=4)

##2.构建模型
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
        self.pooling = torch.nn.MaxPool2d(2)
        self.fc = torch.nn.Linear(320,10)

    def forward(self,x):
        batch_size = x.size(0)
        x = F.relu(self.pooling(self.conv1(x)))
        x = F.relu(self.pooling(self.conv2(x)))
        x = x.view(batch_size, -1)  ##转成一维
        x = self.fc(x)
        return x
model = Net()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

##3.损失函数,优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum = 0.5)

##4.模型训练
def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data

        inputs, target = inputs.to(device), target.to(device)
        optimizer.zero_grad()

        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 ==299:
            print("[%d, %5d] loss: %.3f" % (epoch+1, batch_idx+1, running_loss/300))
            running_loss =0.0

def te():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data

            # print("images:",images)
            # print("labels:", labels)

            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            # print("outputs:", outputs)

            _, predicted = torch.max(outputs.data, dim=1)
            # print("predicted:", predicted)
            # print("_:", _)

            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print("accuracy on test set: %d %%" % (100*correct/total))
    return correct/total

if __name__== "__main__":
    epoch_list = []
    acc_list = []
    for epoch in range(10):
        train(epoch)
        acc = te()
        epoch_list.append(epoch)
        acc_list.append(acc)

    plt.plot(epoch_list,acc_list)
    plt.ylabel("accuracy")
    plt.xlabel("epoch")
    plt.show()
























  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值