利用残差网络识别cifar10数据集

import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable


learning_rate=0.001
num_epoches=50              ##定义学习率和迭代次数

# Image Preprocessing
train_transform = transforms.Compose([
    transforms.Resize(40),
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(32),
    transforms.ToTensor(),
    transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])
])

test_transform=transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])
])                   ##为训练集和测试集分别加入处理以及归一化,把图片转换成tensor类型

# CIFAR-10 Dataset
train_dataset = dsets.CIFAR10(
    root='./data',
    train=True,
    transform=train_transform,
    download=True
)
test_dataset = dsets.CIFAR10(
    root='./data',
    train=False,
    transform=test_transform,
    download=True
)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=50,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=50,
                                          shuffle=False)           ###通过torchvision自带的函数下载以及读入数据集cifar10


# 3*3 Convolution
def con3x3(in_channels, out_channels, stride=1):
    return nn.Conv2d(in_channels, out_channels, kernel_size=3,
                     stride=stride, padding=1, bias=False)          ##先定义一个核为3的卷积层


# Residual Block
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1, downsample=None):
        super(ResidualBlock, self).__init__()
        self.conv1 = con3x3(in_channels, out_channels, stride)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        # inplace=True计算结果不会有影响,利用inplace计算可以节省内存,
        # 同时还可以省去反复申请和释放内存的时间;但是会对原变量进行覆盖。
        self.conv2 = con3x3(out_channels, out_channels)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.downsample = downsample

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        if self.downsample:  # 计算残差
            residual = self.downsample(x)
        out += residual
        out = self.relu(out)

        return out                                  ##构造一个基础的残差快


# ResNet Model
class ResNet(nn.Module):
    def __init__(self, block, layers, num_classes=10):
        super(ResNet, self).__init__()
        self.in_channels = 16
        self.conv = con3x3(3, 16)
        self.bn = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self.make_layer(block, 16, layers[0])
        self.layer2 = self.make_layer(block, 32, layers[0], 2)
        self.layer3 = self.make_layer(block, 64, layers[1], 2)
        self.avg_pool = nn.AvgPool2d(8)
        self.fc = nn.Linear(64, num_classes)

    def make_layer(self, block, out_channels, blocks, stride=1):
        downsample = None
        if (stride != 1) or (self.in_channels != out_channels):
            downsample = nn.Sequential(
                con3x3(self.in_channels, out_channels, stride=stride),
                nn.BatchNorm2d(out_channels)
            )
        layers = []
        layers.append(block(self.in_channels, out_channels, stride, downsample))
        self.in_channels = out_channels
        for i in range(1, blocks):
            layers.append(block(out_channels, out_channels))

        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.conv(x)
        out = self.bn(out)
        out = self.relu(out)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.avg_pool(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)

        return out                  ###构造一个残差网络


model = ResNet(ResidualBlock, [2, 2, 2, 2])
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate,momentum=0.9)           ##定义优化器和损失函数

# Training
for epoch in range(num_epoches):           #开始训练,进行迭代
    for tr_data in train_loader:           #开始将训练集中的数据送入模型中
        train_img, train_label = tr_data
        train_img = train_img.view(train_img.size(0), -1)
        with torch.no_grad():
            train_img = Variable(train_img)
            train_label = Variable(train_label)
        train_img = train_img.reshape([-1, 3, 32, 32])              ##将图片类型进行转化,送入模型
        out = model(train_img)
        loss = criterion(out, train_label)
        print_loss=loss.item()
        optimizer.zero_grad()     #先归零梯度
        loss.backward()             #反向传播
        optimizer.step()            #进行步长
        if (epoch+1)%10==0:         #显示训练过程
            print('*'*10)
            print('epoch{}'.format(epoch+1))
            print('loss:{:.4f}'.format(print_loss))


model.eval()           #调用函数开始训练
eval_loss=0
eval_acc=0
for data in test_loader:
    img,label=data          #开始测试过程
    img=img.view(img.size(0),-1)
    with torch.no_grad():
        img=Variable(img)
        label=Variable(label)
    img = img.reshape([-1, 3, 32, 32])
    out=model(img)
    loss=criterion(out,label)
    eval_loss+=loss.item()*label.size(0)
    _,pred=torch.max(out,1)
    num_correct=(pred==label).sum()
    eval_acc+=num_correct.item()
    print('Test Loss:{:.6f},acc:{:.6f}'.format(eval_loss/(len(test_dataset)),eval_acc/(len(test_dataset))))            #输出最后结果

参考自《深度学习之pytorch》廖星宇著

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值