基于卷积猫狗识别

一、学习任务

1.按照 https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/5.2-using-convnets-with-small-datasets.ipynb,
利用TensorFlow和Keras,自己搭建卷积神经网络完成狗猫数据集的分类实验;将关键步骤用汉语注释出来。解释什么是overfit(过拟合)?什么是数据增强?如果单独只做数据增强,精确率提高了多少?然后再添加的dropout层,是什么实际效果?
2. 用Vgg19网络模型完成狗猫分类,写出实验结果;

二、学习内容

1.环境配置

1.安装Anaconda
2.配置Pytorch

pip install -i https://pypi.tuna.tsinghua.edu.cn/simple torch
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple torchvision

2.构建数据集

数据集下载地址:https://www.kaggle.com/lizhensheng/-2000
在这里插入图片描述

3.猫狗实例

1.导入库

# 导入库
import torch.nn.functional as F
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.parallel
 
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets

2.设置超参数

设置超参数
 
BATCH_SIZE = 20
EPOCHS = 10
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

3.图像处理与图像增强

# 数据预处理
transform = transforms.Compose([
 
    transforms.Resize(100),
 
    transforms.RandomVerticalFlip(),
 
    transforms.RandomCrop(50),
 
    transforms.RandomResizedCrop(150),
 
    transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
 
    transforms.ToTensor(),
 
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
 
])

4.读取数据和导入数据

# 读取数据
 
dataset_train = datasets.ImageFolder('data/train', transform)
 
print(dataset_train.imgs)
 
# 对应文件夹的label
 
print(dataset_train.class_to_idx)
 
dataset_test = datasets.ImageFolder('data/val', transform)
 
# 对应文件夹的label
 
print(dataset_test.class_to_idx)
 
# 导入数据
 
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
 
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=True)

5.定义网络模型

# 定义网络
 
class ConvNet(nn.Module):
 
    def __init__(self):
 
        super(ConvNet, self).__init__()
 
        self.conv1 = nn.Conv2d(3, 32, 3)
 
        self.max_pool1 = nn.MaxPool2d(2)
 
        self.conv2 = nn.Conv2d(32, 64, 3)
 
        self.max_pool2 = nn.MaxPool2d(2)
 
        self.conv3 = nn.Conv2d(64, 64, 3)
 
        self.conv4 = nn.Conv2d(64, 64, 3)
 
        self.max_pool3 = nn.MaxPool2d(2)
 
        self.conv5 = nn.Conv2d(64, 128, 3)
 
        self.conv6 = nn.Conv2d(128, 128, 3)
 
        self.max_pool4 = nn.MaxPool2d(2)
 
        self.fc1 = nn.Linear(4608, 512)
 
        self.fc2 = nn.Linear(512, 1)
 
 
 
    def forward(self, x):
 
        in_size = x.size(0)
 
        x = self.conv1(x)
 
        x = F.relu(x)
 
        x = self.max_pool1(x)
 
        x = self.conv2(x)
 
        x = F.relu(x)
 
        x = self.max_pool2(x)
 
        x = self.conv3(x)
 
        x = F.relu(x)
 
        x = self.conv4(x)
 
        x = F.relu(x)
 
        x = self.max_pool3(x)
 
        x = self.conv5(x)
 
        x = F.relu(x)
 
        x = self.conv6(x)
 
        x = F.relu(x)
 
        x = self.max_pool4(x)
 
        # 展开
 
        x = x.view(in_size, -1)
 
        x = self.fc1(x)
 
        x = F.relu(x)
 
        x = self.fc2(x)
 
        x = torch.sigmoid(x)
 
        return x
 
modellr = 1e-4
 
# 实例化模型并且移动到GPU
 
model = ConvNet().to(DEVICE)
 
# 选择简单暴力的Adam优化器,学习率调低
 
optimizer = optim.Adam(model.parameters(), lr=modellr)

6.调整学习率

def adjust_learning_rate(optimizer, epoch):
 
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
 
    modellrnew = modellr * (0.1 ** (epoch // 5))
 
    print("lr:",modellrnew)
 
    for param_group in optimizer.param_groups:
 
        param_group['lr'] = modellrnew

7.定义训练与验证方法

# 定义训练过程
 
def train(model, device, train_loader, optimizer, epoch):
 
    model.train()
 
    for batch_idx, (data, target) in enumerate(train_loader):
 
        data, target = data.to(device), target.to(device).float().unsqueeze(1)
 
        optimizer.zero_grad()
 
        output = model(data)
 
        # print(output)
 
        loss = F.binary_cross_entropy(output, target)
 
        loss.backward()
 
        optimizer.step()
 
        if (batch_idx + 1) % 10 == 0:
 
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
 
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
 
                       100. * (batch_idx + 1) / len(train_loader), loss.item()))
 
 
 
 
 
# 定义测试过程
 
def val(model, device, test_loader):
 
    model.eval()
 
    test_loss = 0
 
    correct = 0
 
    with torch.no_grad():
 
        for data, target in test_loader:
 
            data, target = data.to(device), target.to(device).float().unsqueeze(1)
 
            output = model(data)
 
            # print(output)
 
            test_loss += F.binary_cross_entropy(output, target, reduction='mean').item()  # 将一批的损失相加
 
            pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(device)
 
            correct += pred.eq(target.long()).sum().item()
 
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
 
            test_loss, correct, len(test_loader.dataset),
 
            100. * correct / len(test_loader.dataset)))

8.训练并保存模型

# 训练
 
for epoch in range(1, EPOCHS + 1):
 
    adjust_learning_rate(optimizer, epoch)
 
    train(model, DEVICE, train_loader, optimizer, epoch)
 
    val(model, DEVICE, test_loader)
 
torch.save(model, 'model.pth')

9.训练结果如下

在这里插入图片描述

4.测试代码

from __future__ import print_function, division
 
from PIL import Image
 
from torchvision import transforms
import torch.nn.functional as F
 
import torch
import torch.nn as nn
import torch.nn.parallel
# 定义网络
class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.max_pool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.max_pool2 = nn.MaxPool2d(2)
        self.conv3 = nn.Conv2d(64, 64, 3)
        self.conv4 = nn.Conv2d(64, 64, 3)
        self.max_pool3 = nn.MaxPool2d(2)
        self.conv5 = nn.Conv2d(64, 128, 3)
        self.conv6 = nn.Conv2d(128, 128, 3)
        self.max_pool4 = nn.MaxPool2d(2)
        self.fc1 = nn.Linear(4608, 512)
        self.fc2 = nn.Linear(512, 1)
 
    def forward(self, x):
        in_size = x.size(0)
        x = self.conv1(x)
        x = F.relu(x)
        x = self.max_pool1(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.max_pool2(x)
        x = self.conv3(x)
        x = F.relu(x)
        x = self.conv4(x)
        x = F.relu(x)
        x = self.max_pool3(x)
        x = self.conv5(x)
        x = F.relu(x)
        x = self.conv6(x)
        x = F.relu(x)
        x = self.max_pool4(x)
        # 展开
        x = x.view(in_size, -1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = torch.sigmoid(x)
        return x
# 模型存储路径
model_save_path = 'model.pth'
 
# Data augmentation and normalization for training
# Just normalization for validation
# 定义预训练变换
# 数据预处理
transform_test = transforms.Compose([
    transforms.Resize(100),
    transforms.RandomVerticalFlip(),
    transforms.RandomCrop(50),
    transforms.RandomResizedCrop(150),
    transforms.ColorJitter(brightness=0.5, contrast=0.5, hue=0.5),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
 
 
class_names = ['cat', 'dog'] 
 
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model = torch.load(model_save_path)
model.eval()
# print(model)
 
image_PIL = Image.open('dog.12.jpg')
#
image_tensor = transform_test(image_PIL)
# 以下语句等效于 image_tensor = torch.unsqueeze(image_tensor, 0)
image_tensor.unsqueeze_(0)
image_tensor = image_tensor.to(device) 
out = model(image_tensor)
pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in out]).to(device)
print(class_names[pred])

5.结果

在这里插入图片描述
在这里插入图片描述

6.参考博客

Pytorch自定义模型实现猫狗分类

  • 1
    点赞
  • 27
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值