跟土堆老师学习CNN搭建网络模型
卷积-池化-
卷积-池化
卷积-池化
Flatten
linear
linear
有了损失函数、优化器
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Linear
from torch.nn.modules.flatten import Flatten
from torch.utils.data import DataLoader
test_data =torchvision.datasets.CIFAR10("../data",train=False,transform=torchvision.transforms.ToTensor(),
download=True)
train_data =torchvision.datasets.CIFAR10("../data",train=True,transform=torchvision.transforms.ToTensor(),
download=True)
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
#搭建神经网络
class Qin(nn.Module):
def __init__(self) :
super(Qin, self).__init__()
self.conv1 = Conv2d(3, 32, 5, padding=2)
self.maxpool1=MaxPool2d(2)
self.conv2 = Conv2d(32,32 ,5 , padding=2)
self.maxpool2 = MaxPool2d(2)
self.conv3= Conv2d(32, 64, 5, padding=2)
self.maxpool3 = MaxPool2d(2)
self.flatten = Flatten()
self.linear1 = Linear(1024, 64)
self.linear2 = Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x= self.maxpool1(x)
x = self.conv2(x)
x= self.maxpool2(x)
x = self.conv3(x)
x= self.maxpool3(x)
x= self.flatten(x)
x=self.linear1(x)
x = self.linear2(x)
return x
qin=Qin()
#损失函数
loss_fn = nn.CrossEntropyLoss()
#优化器
optimizer= torch.optim.SGD(qin.parameters(), lr=0.01)
# 设置训练参数
total_train_step=0
total_test_step=0
epoch =10
for i in range(epoch):
print("第{}轮训练开始".format(i+1))
#训练
for data in train_dataloader:
imgs, targets = data
outputs = qin(imgs)
loss = loss_fn(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step=total_train_step+1
if total_train_step % 100 == 0:
print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))
#测试开始
total_test_loss=0
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
outputs = qin(imgs)
loss = loss_fn(outputs, targets)
total_test_loss = total_test_loss+loss.item()
print("整体测试集上的Loss:{}".format(total_test_loss))
继续加油!青青子衿