import torch
from torch.nn import functional as F
from torchvision import transforms,datasets
from torch import nn,optim
from torch.utils.data import DataLoader
#定义模型
class Lenet5(nn.Module):
def __init__(self):
super(Lenet5, self).__init__()
self.conv_unit=nn.Sequential(
#[b,3,32,32]=>[b,16]
nn.Conv2d(3,6,kernel_size=5,stride=1,padding=0),
nn.AvgPool2d(stride=2,padding=0,kernel_size=2),
nn.Conv2d(6,16,kernel_size=5,stride=1,padding=0),
nn.AvgPool2d(stride=2,padding=0,kernel_size=2)
)
self.fc_unit=nn.Sequential(
nn.Linear(16*5*5,120),
nn.ReLU(),
nn.Linear(120,84),
nn.ReLU(),
nn.Linear(84,10)
)
def forward(self,x):
batchsize=x.size(0)
x=self.conv_unit(x)
x=x.view(batchsize,16*5*5)
logits=self.fc_unit(x)
return logits
#加载数据集
data_train=datasets.CIFAR10('cifar',train=True,transform=transforms.Compose([
transforms.Resize([32,32]),
transforms.ToTensor()
]),download=True)
data_train=DataLoader(data_train,batch_size=32,shuffle=True)
data_test=datasets.CIFAR10('cifar',train=False,transform=transforms.Compose([
transforms.Resize([32,32]),
transforms.ToTensor()
]),download=True)
data_test=DataLoader(data_test,batch_size=32,shuffle=True)
x,lable=next(iter(data_train))
print("x:",x.shape," label:",lable)
#开始训练
device=torch.device('cuda')#要用GPU计算 没有的改为cpu
model=Lenet5().to(device)
criteon=nn.CrossEntropyLoss().to(device)
optimizer=optim.Adam(model.parameters(),lr=1e-3)
print(model)
for epoch in range(10):#可以修改迭代次数
#训练
model.train()
for btach,(x,label) in enumerate(data_train):
x=x.to(device)
label=label.to(device)
logits=model(x)
loss=criteon(logits,label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch:',epoch,' loss:',loss.item())
#测试
with torch.no_grad():
model.eval()
totalcorrect = 0
toatalnum = 0
for x,label in data_test:
x=x.to(device)
label=label.to(device)
logits=model(x)
pred=logits.argmax(dim=1)
totalcorrect+=torch.eq(pred,label).float().sum().item()
toatalnum+=x.size(0)
acc=totalcorrect/toatalnum
print('epoch:',epoch,' acc:',acc)