Resnet论文提到的针对cifar10的Resnet20结构

针对cifar10的Resnet20结构,稳定跑出92左右的测试精度,符合原论文效果
完整文件可下载点此下载

  1. resnet.py
import torch
import torch.nn as nn
import torch.nn.functional as F

class ResidualBlock(nn.Module):
    def __init__(self, inchannel, outchannel, stride=1):
        super(ResidualBlock, self).__init__()
        self.left = nn.Sequential(
            nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(outchannel),
            nn.ReLU(inplace=True),
            nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(outchannel)
        )
        self.shortcut = nn.Sequential()
        if stride != 1 or inchannel != outchannel:
            self.shortcut = nn.Sequential(
                nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(outchannel)
            )

    def forward(self, x):
        out = self.left(x)
        out += self.shortcut(x)
        out = F.relu(out)
        return out

class ResNet(nn.Module):
    def __init__(self, ResidualBlock, num_classes=10):
        super(ResNet, self).__init__()
        self.inchannel = 16
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(16),
            nn.ReLU(),
        )
        self.layer1 = self.make_layer(ResidualBlock, 16, 3, stride=1)
        self.layer2 = self.make_layer(ResidualBlock, 32, 3, stride=2)
        self.layer3 = self.make_layer(ResidualBlock, 64, 3, stride=2)
        self.fc = nn.Linear(64, num_classes)

    def make_layer(self, block, channels, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)   #strides=[1,1]
        layers = []
        for stride in strides:
            layers.append(block(self.inchannel, channels, stride))
            self.inchannel = channels
        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.conv1(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = F.avg_pool2d(out, 8)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out


def ResNet20():

    return ResNet(ResidualBlock)

-read_data.py

import torch
import torchvision
from torch import nn
from torchvision import datasets,transforms
from torch.utils.data import DataLoader   #导入下载通道

def read_cifar10(batchsize,data_dir):
    # 数据变换
    transform_train = transforms.Compose([
                                    # transforms.RandomRotation(),  # 随机旋转
                                    transforms.RandomCrop(32, padding=4),  # 填充后裁剪
                                    transforms.RandomHorizontalFlip(p=0.5),  # 水平翻转
                                    # transforms.Resize((32,32)),
                                    transforms.ToTensor(),
                                    # transforms.ColorJitter(brightness=1),  # 颜色变化。亮度
                                    transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],std=[0.2023, 0.1994, 0.2010])])

    transform_test = transforms.Compose([
                                    # transforms.Resize((32,32)),
                                    transforms.ToTensor(),#Q1数据归一化问题:ToTensor是把一个取值范围是[0,255]的PIL.Image或者shape为(H,W,C)的numpy.ndarray,转换成形状为[C,H,W],取值范围是[0,1.0]的torch.FloadTensor
                                    transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],std=[0.2023, 0.1994, 0.2010])])#Q2均值方差问题:RGB3个通道分别进行正则化处理
    '''
    torchvision.transforms.Compose类看作一种容器,能够同时对多种数据变换进行组合。传入的参数是一个列表,列表中的元素就
    是对载入的数据进行的各种变换操作。在经过标准化变换之后,数据全部符合均值为0、标准差为1的标准正态分布。
    '''
    # 数据加载
    data_train = datasets.CIFAR10(root=data_dir,
                                  train=True,
                                  transform=transform_train,
                                  download=True)

    data_test = datasets.CIFAR10(root=data_dir,
                                 train=False,
                                 transform=transform_test,
                                 download=True
                                 )
    # 数据装载和数据预览
    data_loader_train = DataLoader(dataset=data_train,
                                   batch_size=batchsize,
                                   shuffle=True,    #打乱数据
                                   pin_memory=True)   #内存充足时,可以设置pin_memory=True。当系统卡住,或者交换内存使用过多的时候,设置pin_memory=False
                                   #drop_last=True)   #处理数据集长度除于batch_size余下的数据。True就抛弃,否则保留
    data_loader_test = DataLoader(dataset=data_test,
                                  batch_size=128,
                                  shuffle=False,
                                  pin_memory=True)
    return data_loader_train,data_loader_test
  • train.py
import torch
import torchvision
import numpy
import time
from torch import nn
# import matplotlib
# import matplotlib.pyplot as plt
from torch.autograd import Variable

from resnet import ResNet20
# from transformer_resnet import ResNet20
from read_data import read_cifar10
import os
os.environ['CUDA_VISIBLE_DEVICES']='1,2'

def main():
    since = time.time()
    data_dir = './data'
    model_dir = './cnn'
    batchsize = 128
    n_epochs = 200
    best_acc = 0.0#记录测试最高准确率
    Lr = 0.1

    data_loader_train,data_loader_test = read_cifar10(batchsize,data_dir)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = ResNet20().to(device)
    print(model)
    #取出权重参数和偏置参数,仅对权重参数加惩罚系数
    weight_p, bias_p = [],[]
    for name, p in model.named_parameters():
        if 'bias' in name:
            bias_p +=[p]
        else:
            weight_p +=[p]
    cost = nn.CrossEntropyLoss().to(device)      #Q3:要不要加正则项

    #L1正则
    # regularization_loss = 0
    # for param in model.parameters():
    #     regularization_loss += torch.sum(torch.abs(param))
    #
    # optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
    optimizer = torch.optim.SGD([{'params':weight_p,'weight_decay':1e-4},
                               {'params':bias_p,'weight_decay':0}],lr=Lr,momentum=0.9)#内置的SGD是L2正则,且对所有参数添加惩罚,对偏置加正则易导致欠拟合,一般只对权重正则化
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[82,122,163],gamma=0.1,last_epoch=-1)#Q4: 对应多少步,epoch= 32000/(50000/batch_size),48000,64000

    Loss_list = []
    Accuracy_list = []
    for epoch in range(n_epochs):

        model.train()

        training_loss = 0.0
        training_correct = 0
        training_acc = 0.0
        print("Epoch {}/{}".format(epoch+1,n_epochs))
        print("-"*30)

        total_train = 0
        for i,data in enumerate(data_loader_train):
            x,labels = data
            x,labels = x.to(device), labels.to(device)

            # print(x.shape)
            # print(label.shape)
            #前向传播计算损失
            outputs = model(x)
            loss = cost(outputs, labels)
            training_loss += loss.item()
            # print(outputs)
            _,pred = torch.max(outputs,1)#预测最大值所在位置标签
            total_train += labels.size(0)
            num_correct = (pred == labels).sum()
            training_acc += num_correct.item()

            #反向传播+优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # print('Epoch:', epoch, 'train loss:', training_loss/len(data_loader_train))
        # Loss_list.append(training_loss/len(data_loader_train))
            if i % 100 == 99:
                print('[%d, %5d] traing_loss: %f' % (epoch + 1, i + 1, training_loss / 100))
                Loss_list.append(training_loss / 100)
                training_loss = 0.0
        print('Train acc:{:.4}%'.format(100*training_acc/total_train))

        scheduler.step()

        model.eval()
        testing_correct = 0
        total = 0
        with torch.no_grad():
            for data in data_loader_test:
                x_test, label_test = data
                x_test, label_test = x_test.to(device), label_test.to(device)
                outputs = model(x_test)
                _,pred = torch.max(outputs.data,1)
                total += label_test.size(0)
                testing_correct += (pred == label_test).sum().item()
        print('Test acc: {:.4}%'.format(100*testing_correct/total))
        Accuracy_list.append(100*testing_correct/total)
        acc = 100*testing_correct/total
        if acc>best_acc:
            best_acc = acc
            best_acc_loc = epoch
        # print("Loss :{:.4f}, Train acc :{.4f}, Test acc :{.4f}".format(training_loss/len(data_train),100*training_correct/len(data_train),100*testing_correct/len(data_test)))

    print('test best acc:{}% at epoch{}'.format(best_acc,best_acc_loc))
    time_used = time.time() - since
    print('-' * 30)
    print('训练用时: {:.0f}m {:.0f}s'.format(time_used // 60, time_used % 60))
    # print('最高准确率: {}%'.format(100 * best_acc))

    #保存参数优化器等
    state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
    torch.save(state, os.path.join(model_dir,'{}best_acc.pth'.format(best_acc)))

if __name__ == '__main__':

    main()

有问题可留言讨论,互相学习

  • 3
    点赞
  • 35
    收藏
    觉得还不错? 一键收藏
  • 23
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 23
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值