VGG16微调实操CNN-猫狗分类

前言 introduction

  • 使用Windows10 + Python + Pytorch + VScode + GPU(GTX 1050)
    在这里插入图片描述
  • 承接这篇使用tensorflow做猫狗分类的文章
  • 数据来源于kaggle

结构 structure

  • 导入相关包和模块 package&module
# https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition/submissions
# 1. data
# 2. model
# 3. train
# 4. evaluate

import torch    # pytorch
import numpy as np  # 数据处理
import os   # 文件操作

from torch.utils.data import Dataset, DataLoader    # 数据集
import torchvision  # pytorch 图像
import torch.nn as nn   # 神经网络
import torchvision.transforms as transforms # 数据增强
import matplotlib.pyplot as plt # 绘图
from PIL import Image   # 图像处理
  • 数据集的处理 dataset:一般不直接使用array或者list作为数据集,而是通过构造一个dataset类,然后生成datasetloader的一个迭代器,该迭代器抽象了shuffle、batch_size等操作,很方便。
## 准备工作
# device config
# GPU是否可用,后续可以据此判断是否可以加速
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# hyper parameters
# 学习用的超参数
num_epoches = 1
batch_size = 64
learning_rate = 0.01

## dataset
# 自定义的数据集类,用作处理读入数据,必须实现以下三个方法,因为生成dataloader要用,你想想,如果没有这三个方法能够做batchsize、shuffle等操作吗
# 初步解释:https://pytorch.org/tutorials/beginner/basics/data_tutorial.html
# 深入解释dataloader:https://pytorch.org/docs/stable/data.html
# __init__()用于初始化,先调用父类的__init__()
# __len__()返回数据集中的样本个数
# __getitem__()根据索引返回一个样本(包括特征向量和标签)
class CDDataset(Dataset):
    def __init__(self, dir, mode='train', transforms=None) -> None:
        super(self).__init__()
        
        self.dir = dir
        self.files = os.listdir(dir) # list of files' name
        self.transforms = transforms
        self.mode = mode
        
        self.labels = []
        if mode == "train":
            for img in self.files:
                # dog->1, cat->0
                if "dog" in img:
                    self.labels.append(1)
                else:
                    self.labels.append(0)

    def __len__(self):
        return len(self.files)

    def __getitem__(self, index):
        img = Image.open(os.path.join(self.dir, self.files[index])) # Open index_th image
        if self.transforms:
            img = self.transforms(img)
        
        if self.mode == 'train':
            return img, self.labels[index]
        else:
            return img

# 数据增强,mean和std是归一化是三个通道用的均值和标准差,
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.25, 0.25, 0.25])
data_transforms = {# 图片操作在ToTensor()之前
    'train': transforms.Compose([
        transforms.RandomSizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ]),
    'val':transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
}

# 实例化数据集
train_dir = "./train"
test_dir = './test'
train_dataset = CDDataset(dir=train_dir, mode='train', transforms=data_transforms['train'])
# 数据集划分train和test
# 注意本来是:train、valuation、test三个,train作训练、valuation做超参数和训练过程中评估(有时没有)、test做最终评估
# generator保证每固定随机种子
# https://pytorch.org/docs/stable/data.html
# attention: np.int32 for length
train_dataset, test_dataset = torch.utils.data.random_split(train_dataset, [np.int32(0.85*len(train_dataset)), np.int32(0.15*len(train_dataset))], generator=torch.Generator().manual_seed(0))

# 生成dataloader,如果没有实现三种dataset三种方法,则会报错
# test不必做shuffle,train才有必要做shuffle
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
  • 模型的处理 model
## model
# fine tuning pytorch tutorial:https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
class model(nn.Module):
    def __init__(self, num_classes=2):
        super(model, self).__init__()
        self.vgg16 = torchvision.models.vgg16(pretrained=True)
        # set params freezed
        for param in self.vgg16.parameters():
            param.requires_grad = False
        in_features = self.vgg16.classifier[6].in_features
        # redefine the 6th classifier
        self.vgg16.classifier[6] = nn.Linear(in_features=in_features, out_features=num_classes)
        
    def forward(self, x):
        x.to(device)
        x = self.vgg16(x)
        # 这里不用加softmax,因为在交叉熵损失中会自动用softmax
        return x
        
model = model().to(device)	# 很重要
  • 训练 train
## train
criterion = nn.CrossEntropyLoss()	# 交叉熵损失
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)	# adam优化算法

# 每次epoch需要多少次iteration
n_total_steps = len(train_loader)
print("Begin to train")
for epoch in range(num_epoches):
    for i, (imgs, labels) in enumerate(train_loader):
		# CPU和GPU上的tensor不能做运算,GPU只能算显存内的数据
        imgs, labels = imgs.to(device), labels.to(device)
		
		# 前向传播
        outputs = model(imgs)
        loss = criterion(outputs, labels)
		
		# 反向传播
		# zero_grad()必须,否则会把每次迭代的梯度都加起来
        optimizer.zero_grad()
        loss.backward()		# 逻辑反向
        optimizer.step()	# 代数反向
		
		# 训练时打印epoch、iteration、loss信息
        if (i+1)%100 == 0:
            print(f'Epoch {epoch+1}, Step {i+1}/{n_total_steps}, Loss:{loss.item():.4f}')

print("Finished Training")
  • 评估 evaluation
with torch.no_grad():	# 不计算梯度
    n_correct = 0
    n_samples = 0
    n_class_correct = [0 for i in range(2)]
    n_class_samples = [0 for i in range(2)]
	
	# 使用testloader中的batch_size数据评估
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        
        _, predictions = torch.max(outputs, 1)	# 返回value和index
        n_samples += labels.shape[0]
        n_correct += (predictions==labels).sum().item()
    
    acc = 100.0 * n_correct / n_samples
    print(f"accuracy = {acc}")

结果与分析 result&analysis

  • 运行结果
    在这里插入图片描述
  • 模型信息
VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace=True)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace=True)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace=True)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace=True)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace=True)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace=True)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace=True)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace=True)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace=True)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace=True)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace=True)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace=True)
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace=True)
    (5): Dropout(p=0.5, inplace=False)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
  )
)
#######
model(
  (vgg16): VGG(
    (features): Sequential(
      (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (1): ReLU(inplace=True)
      (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (3): ReLU(inplace=True)
      (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) 
      (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (6): ReLU(inplace=True)
      (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))        
      (8): ReLU(inplace=True)
      (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) 
      (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))       
      (11): ReLU(inplace=True)
      (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))       
      (13): ReLU(inplace=True)
      (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))       
      (15): ReLU(inplace=True)
      (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))       
      (18): ReLU(inplace=True)
      (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))       
      (20): ReLU(inplace=True)
      (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))       
      (22): ReLU(inplace=True)
      (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (25): ReLU(inplace=True)
      (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (27): ReLU(inplace=True)
      (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (29): ReLU(inplace=True)
      (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    )
    (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
    (classifier): Sequential(
      (0): Linear(in_features=25088, out_features=4096, bias=True)
      (1): ReLU(inplace=True)
      (2): Dropout(p=0.5, inplace=False)
      (3): Linear(in_features=4096, out_features=4096, bias=True)
      (4): ReLU(inplace=True)
      (5): Dropout(p=0.5, inplace=False)
      (6): Linear(in_features=4096, out_features=2, bias=True)
    )
  )
)
torch.save({'epoch': epochID + 1, 'state_dict': model.state_dict(), 'best_loss': lossMIN,
                            'optimizer': optimizer.state_dict(),'alpha': loss.alpha, 'gamma': loss.gamma},
                           checkpoint_path + '/m-' + launchTimestamp + '-' + str("%.4f" % lossMIN) + '.pth.tar')
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值