第J2周:ResNet50V2算法实战与解析

  1. 请根据本文TensorFlow代码,编写出相应的Pytorch代码
  2. 了解ResNetV2与ResNetV的区别

# 前期工作

  • 语言环境:Python3.9.18
  • 编译器:Jupyter Lab
  • 深度学习环境:Pytorch 1.12.1

1.设置GPU

import torch
import torch.nn as nn
import torchvision
from torchvision import transforms,datasets

import os,PIL,random,pathlib

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device

2. 导入数据

data_dir = "F:/365data/J3/"
data_dir = pathlib.Path(data_dir)

data_path = list(data_dir.glob('*'))
classeNames = [str(path).split('\\')[3] for path in data_path]
classeNames
train_transforms = transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
        mean = [0.485,0.456,0.406],
        std = [0.229,0.224,0.225]
    )
])

test_transforms = transforms.Compose([
    transforms.Resize([224,224]),
    transforms.ToTensor(),
    transforms.Normalize(
        mean = [0.485,0.456,0.406],
        std = [0.229,0.224,0.225]
    )
])

total_data = datasets.ImageFolder("F:/365data/J3/",transform = train_transforms)
total_data

3.划分训练集测试集

batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                       batch_size = batch_size,
                                       shuffle = True,
                                       num_workers = 1)

test_dl = torch.utils.data.DataLoader(test_dataset,
                                      batch_size = batch_size,
                                      shuffle = True,
                                      num_workers = 1)
for X,y in test_dl:
    print('Shape of X:',X.shape)
    print('shape of y:',y.shape,y.dtype)
    break

4.数据可视化

import matplotlib.pyplot as plt 
from PIL import Image

image_folder = 'F:/365data/J1/bird_photos/Bananaquit/'

image_files = [f for f in os.listdir(image_folder) if f.endswith(('.jpg','.jpeg','.png'))]

fig,axes = plt.subplots(3,8,figsize=(16,6))

for ax,img_file in zip(axes.flat,image_files):
    img_path = os.path.join(image_folder,img_file)
    img = PIL.Image.open(img_path)
    ax.imshow(img)
    ax.axis('off')

plt.tight_layout()
plt.show()

二、构建ResNet-50V2模型

1.构造模型

import torch.nn.functional as F

class block1(nn.Module):
    def __init__ (self,channels,stride = 1):
        super(block1,self).__init__()
        channel1,channel2 = channels
        self.bn1 = nn.Sequential(
            nn.BatchNorm2d(channel1),
            nn.ReLU()
        )
        self.conv1 = nn.Sequential(
            nn.Conv2d(channel1,channel2,kernel_size=1),
            nn.BatchNorm2d(channel2),
            nn.ReLU(),
            nn.ZeroPad2d(1),
            nn.Conv2d(channel2,channel2,kernel_size=3),
            nn.BatchNorm2d(channel2),
            nn.ReLU(),
            nn.Conv2d(channel2,channel2*4,kernel_size=1)
        )
        self.conv2 = nn.Conv2d(channel1,channel2*4,kernel_size=1)

    def forward(self,x):
        x = self.bn1(x)
        out = self.conv1(x)
        out += self.conv2(x) 
        out = F.relu(out)
        return out

class block2(nn.Module):
    def __init__ (self,channels,stride = 1):
        super(block2,self).__init__()
        channel1,channel2 = channels
        self.conv1 = nn.Sequential(
            nn.BatchNorm2d(channel1),
            nn.ReLU(),
            nn.Conv2d(channel1,channel2,kernel_size=1),
            nn.BatchNorm2d(channel2),
            nn.ReLU(),
            nn.ZeroPad2d(1),
            nn.Conv2d(channel2,channel2,kernel_size=3),
            nn.BatchNorm2d(channel2),
            nn.ReLU(),
            nn.Conv2d(channel2,channel2*4,kernel_size=1)
        )

    def forward(self,x):
        out = self.conv1(x)
        out += x 
        out = F.relu(out)
        return out
    
class block3(nn.Module):
    def __init__ (self,channels,stride = 1):
        super(block3,self).__init__()
        channel1,channel2 = channels
        self.conv1 = nn.Sequential(
            nn.BatchNorm2d(channel1),
            nn.ReLU(),
            nn.Conv2d(channel1,channel2,kernel_size=1),
            nn.BatchNorm2d(channel2),
            nn.ReLU(),
            nn.ZeroPad2d(1),
            nn.Conv2d(channel2,channel2,kernel_size=3,stride=2),
            nn.BatchNorm2d(channel2),
            nn.ReLU(),
            nn.Conv2d(channel2,channel2*4,kernel_size=1)
        )
        self.maxpool = nn.MaxPool2d(kernel_size=1,stride=2)

    def forward(self,x):
        out = self.conv1(x)
        out += self.maxpool(x)
        out = F.relu(out)
        return out
    
class ResNet50V2(nn.Module):
    def __init__(self,num_classes=1000):
        super(ResNet50V2,self).__init__()
        self.initial = nn.Sequential(
            nn.ZeroPad2d(3),
            nn.Conv2d(3,64,kernel_size=7,stride=2),
            nn.ZeroPad2d(1),
            nn.MaxPool2d(kernel_size=3,stride=2)
        )

        self.block1 = block1([64,64])
        self.block2 = block2([256,64])
        self.block3 = block3([256,64])

        self.block4 = block1([256,128])
        self.block5 = block2([512,128])
        self.block6 = block2([512,128])
        self.block7 = block3([512,128])

        self.block8 = block1([512,256])
        self.block9 = block2([1024,256])
        self.block10 = block2([1024,256])
        self.block11 = block2([1024,256])
        self.block12 = block2([1024,256])
        self.block13 = block3([1024,256])

        self.block14 = block1([1024,512])
        self.block15 = block2([2048,512])
        self.block16 = block2([2048,512])

        self.final = nn.Sequential(
            nn.BatchNorm2d(2048),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((1, 1))
        )
        self.fc = nn.Linear(2048,num_classes)
                        
        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m,nn.BatchNorm2d):
                nn.init.constant_(m.bias,0)
                nn.init.constant_(m.weight,1)
            elif isinstance(m,nn.Linear):
                nn.init.constant_(m.bias,0)

    def forward(self,x):
        x = self.initial(x)

        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        
        x = self.block8(x)
        x = self.block9(x)
        x = self.block10(x)
        x = self.block11(x)
        x = self.block12(x)
        x = self.block13(x)

        x = self.block14(x)
        x = self.block15(x)
        x = self.block16(x)

        x = self.final(x)
        x = x.view(x.size(0),-1)
        x = self.fc(x)

        return x
model = ResNet50V2(num_classes=len(classeNames)).to(device)
model    

ResNet50V2与ResNet的区别是前者增加了一种残差块,其跳跃连接为最大池化;然后将三种块堆叠。当然最重要的是ResNet50V2将BN层与激活层提前到了卷积层之前

2. 统计模型参数

# 统计模型参数量以及其他指标
import torchsummary as summary
summary.summary(model, (3, 224, 224))

三、训练模型

1. 构建训练函数

def train(dataloader,model,optimizer,loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)

    train_acc,train_loss = 0,0

    for X,y in dataloader:
        X,y = X.to(device),y.to(device)

        pred = model(X)
        loss = loss_fn(pred,y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()

    train_loss /= num_batches
    train_acc /= size

    return train_acc,train_loss

2. 构建测试函数

def test (dataloader, model, loss_fn):
    size        = len(dataloader.dataset)  # 测试集的大小
    num_batches = len(dataloader)          # 批次数目, (size/batch_size,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()

    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

3. 优化器和学习率

loss_fn = nn.CrossEntropyLoss()
learn_rate = 1e-3
opt = torch.optim.SGD(model.parameters(),lr=learn_rate)

4. 训练

import copy 

epochs = 20

train_loss=[]
train_acc=[]
test_loss=[]
test_acc=[]
best_acc = 0

for epoch in range(epochs):

    model.train()
    epoch_train_acc,epoch_train_loss = train(train_dl,model,opt,loss_fn)

    model.eval()
    epoch_test_acc,epoch_test_loss = test(test_dl,model,loss_fn)

    if epoch_test_acc > best_acc:
        best_acc = epoch_test_acc
        best_model = copy.deepcopy(model)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    lr = opt.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, 
                          epoch_test_acc*100, epoch_test_loss, lr))

# 保存最佳模型到文件中
PATH = 'F:/365data/J2best_model.pth'  # 保存的参数文件名
torch.save(best_model.state_dict(), PATH)

print('Done')
Epoch: 1, Train_acc:41.6%, Train_loss:1.387, Test_acc:28.3%, Test_loss:144.182, Lr:1.00E-03
Epoch: 2, Train_acc:60.8%, Train_loss:0.884, Test_acc:25.7%, Test_loss:8.248, Lr:1.00E-03
Epoch: 3, Train_acc:78.3%, Train_loss:0.615, Test_acc:44.2%, Test_loss:2.175, Lr:1.00E-03
Epoch: 4, Train_acc:82.7%, Train_loss:0.444, Test_acc:76.1%, Test_loss:0.658, Lr:1.00E-03
Epoch: 5, Train_acc:88.9%, Train_loss:0.322, Test_acc:53.1%, Test_loss:2.459, Lr:1.00E-03
Epoch: 6, Train_acc:89.4%, Train_loss:0.316, Test_acc:54.0%, Test_loss:1.640, Lr:1.00E-03
Epoch: 7, Train_acc:90.5%, Train_loss:0.268, Test_acc:55.8%, Test_loss:2.404, Lr:1.00E-03
Epoch: 8, Train_acc:90.9%, Train_loss:0.308, Test_acc:80.5%, Test_loss:0.575, Lr:1.00E-03
Epoch: 9, Train_acc:88.5%, Train_loss:0.294, Test_acc:61.1%, Test_loss:1.889, Lr:1.00E-03
Epoch:10, Train_acc:93.6%, Train_loss:0.192, Test_acc:77.9%, Test_loss:0.657, Lr:1.00E-03
Epoch:11, Train_acc:95.4%, Train_loss:0.127, Test_acc:83.2%, Test_loss:0.851, Lr:1.00E-03
Epoch:12, Train_acc:95.8%, Train_loss:0.159, Test_acc:82.3%, Test_loss:0.592, Lr:1.00E-03
Epoch:13, Train_acc:86.5%, Train_loss:0.336, Test_acc:61.9%, Test_loss:1.431, Lr:1.00E-03
Epoch:14, Train_acc:88.9%, Train_loss:0.517, Test_acc:75.2%, Test_loss:1.921, Lr:1.00E-03
Epoch:15, Train_acc:86.3%, Train_loss:0.402, Test_acc:62.8%, Test_loss:1.294, Lr:1.00E-03
Epoch:16, Train_acc:92.3%, Train_loss:0.190, Test_acc:77.9%, Test_loss:0.708, Lr:1.00E-03
Epoch:17, Train_acc:97.3%, Train_loss:0.166, Test_acc:85.0%, Test_loss:0.396, Lr:1.00E-03
Epoch:18, Train_acc:94.9%, Train_loss:0.190, Test_acc:72.6%, Test_loss:0.778, Lr:1.00E-03
Epoch:19, Train_acc:95.4%, Train_loss:0.152, Test_acc:72.6%, Test_loss:1.330, Lr:1.00E-03
Epoch:20, Train_acc:97.8%, Train_loss:0.087, Test_acc:85.0%, Test_loss:0.426, Lr:1.00E-03
Done

import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

在这里插入图片描述

  • 15
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值