使用resnet34进行猫狗品种识别(pytorch)

数据来源:fast.ai v3 第一课:你的宠物 | Kaggle

一、读取数据

import re

import torch
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, Subset, random_split  
from pathlib import Path
# 设置参数
bs = 8
path_img = Path(r"F:\pycharm_recommend_MachineVison\Kitty&Puppy\MachineVisionImage\images2"  )

# 定义图像变换
transform = transforms.Compose([
    transforms.Resize((224, 224)),#大小调整
    transforms.RandomHorizontalFlip(),#随机地对输入图像进行水平翻转(数据增强)
    transforms.ToTensor(),#
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])#标准化(通过ImageNet计算得到)




# 创建训练和验证数据集
dataset = ImageFolder(path_img, transform=transform)
#data_loader = DataLoader(dataset, batch_size=bs, shuffle=True, num_workers=4)

# 获取数据集的长度  
dataset_size = len(dataset)  
  
# 划分训练集和剩余数据集的比例  
train_size = int(0.8 * dataset_size)  
val_test_size = dataset_size - train_size  

# 使用random_split来分割数据集  
train_dataset, val_test_dataset = random_split(dataset, [train_size, val_test_size])  

# 创建数据加载器  
data_loader = DataLoader(train_dataset, batch_size=bs, shuffle=True, num_workers=4)  
val_loader = DataLoader(val_test_dataset, batch_size=bs, shuffle=False, num_workers=4)  
import json
with open("class_names.json", "w") as f:
    json.dump(dataset.classes, f)

二、定义网络以及深度学习过程

import torch
import torch.nn as nn

class BasicBlock(nn.Module):#残差块
    expansion = 1 #表示这个基础块不改变通道数

    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        #bias用于确定卷积层是否应该包含一个可学习的偏置项,由于后面(self.bn1 = nn.BatchNorm2d(planes))已经进行了归一化,为了防止过拟合这里使用bias=false
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)#(批量归一化)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
#短路连接
        self.shortcut = nn.Sequential()
        #如果步长不为1或者输入通道数与输出通道数(经过expansion调整)不匹配,则创建一个短路连接。这个连接包含一个卷积层(用于调整通道数或步长)和一个批归一化层。
        if stride != 1 or in_planes != self.expansion * planes:            
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion * planes)
            )
#前向传播
    def forward(self, x):
        out = torch.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)
        out = torch.relu(out)
        return out
class ResNet(nn.Module):
    #num_blocks: 一个列表,指定了每个阶段(layer1, layer2, layer3, layer4)中残差块的数量(下面定义为 [3, 4, 6, 3])。
    def __init__(self, block, num_blocks, num_classes=5):
        super(ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)
#创建一系列的残差块
    def _make_layer(self, block, planes, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_planes, planes, stride))
            self.in_planes = planes * block.expansion
        return nn.Sequential(*layers)
#前向传播
    def forward(self, x):
        x = torch.relu(self.bn1(self.conv1(x)))
        x = torch.max_pool2d(x, kernel_size=3, stride=2, padding=1)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

def ResNet34(num_classes):
    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)

三、实例化模型并且使用GPU

# 使用自定义的 ResNet34 模型
num_classes = len(dataset.classes)
model = ResNet34(num_classes)

# 使用 GPU 加速(如果可用)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)

四、开始训练

import torch.optim as optim
from torch.optim import lr_scheduler

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()#交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)#随机梯度下降优化器
scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)#学习率调度器

# 训练模型
num_epochs = 25

for epoch in range(num_epochs):
    print(f'Epoch {epoch}/{num_epochs - 1}')
    print('-' * 10)

    # 定义每个 epoch 的训练和验证两个阶段
    for phase in ['train', 'val']:
        if phase == 'train':
            model.train()  # 设置模型为训练模式
        else:
            model.eval()   # 设置模型为评估模式
#初始化损失和准确率
        running_loss = 0.0
        running_corrects = 0

        # 遍历数据
        for inputs, labels in data_loader:
            inputs = inputs.to(device)
            labels = labels.to(device)

            # 清零参数梯度
            optimizer.zero_grad()

            # 前向传播
            with torch.set_grad_enabled(phase == 'train'):#当为训练模式时启用梯度计算
                outputs = model(inputs)#模型接收输入数据inputs,并返回预测的输出outputs
                
                
                _, preds = torch.max(outputs, 1)#使用torch.max获取输出中每个样本的最大值的索引,即预测类别。
                loss = criterion(outputs, labels)#使用定义的损失函数criterion计算预测输出outputs和真实标签labels之间的损失。

                # 只在训练阶段反向传播和优化
                if phase == 'train':
                    loss.backward()#梯度计算(反向传播)
                    optimizer.step()#更新模型参数

            # 统计损失和准确率
            running_loss += loss.item() * inputs.size(0)#乘以inputs.size(0)(即当前批次的样本数)是因为损失函数返回的是平均损失
            running_corrects += torch.sum(preds == labels.data)#running_corrects用于累积预测正确的样本数

        if phase == 'train':
            scheduler.step() #更新学习率
#计算损失和准确率
        epoch_loss = running_loss / len(data_loader.dataset)
        epoch_acc = running_corrects.double() / len(data_loader.dataset)
        

        print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

    print()

print('Training complete')

五、检验模型并保存

from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np

# 获取模型预测结果
model.eval()
all_preds = []
all_labels = []

with torch.no_grad():#不启用梯度计算
    for inputs, labels in val_loader:
        inputs = inputs.to(device)
        labels = labels.to(device)
        
        outputs = model(inputs)#inputs和标签labels获取模型预测outputs
        _, preds = torch.max(outputs, 1)#使用torch.max获取预测类别preds
        
        #将所有批次的预测结果和真实标签存储在all_preds和all_labels列表中
        all_preds.append(preds.cpu().numpy())
        all_labels.append(labels.cpu().numpy())
        
#使用np.concatenate将列表中的数组合并为一个大的NumPy数组。
all_preds = np.concatenate(all_preds)
all_labels = np.concatenate(all_labels)

epoch_loss = running_loss / len(data_loader.dataset)
epoch_acc = running_corrects.double() / len(data_loader.dataset)
        

print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')



# 计算混淆矩阵
cm = confusion_matrix(all_labels, all_preds)

# 可视化混淆矩阵
plt.figure(figsize=(12, 12))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=dataset.classes, yticklabels=dataset.classes)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()

# 打印混淆矩阵中难以区分的类别
most_confused = [(dataset.classes[i], dataset.classes[j], cm[i, j]) for i in range(len(dataset.classes)) for j in range(len(dataset.classes)) if cm[i, j] > 1 and i != j]
print(most_confused)

torch.save(model.state_dict(), r"F:\pycharm_recommend_MachineVison\Kitty&Puppy\机器视觉(负责分类)\期末项目\普通方式\model\model1.pth")

六、加载模型并使用

import torch
import torchvision.transforms as transforms
from PIL import Image
from pathlib import Path

import torch  
import torchvision.models as models  # 假设我们使用预定义的模型  

# 2. 加载模型权重  
# 假设您的模型权重保存在'model_weights.pth'文件中  
path = r'F:\pycharm_recommend_MachineVison\Kitty&Puppy\机器视觉(负责分类)(负责分类)\期末项目\普通方式\model\model1'  
model=torch.load(path,weights_only=False)
  
# 3. 将模型移动到正确的设备(例如,GPU如果可用)  
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  
model.to(device)  
  
# 4. 将模型设置为评估模式  
model.eval()  
model_path = r'F:\pycharm_recommend_MachineVison\Kitty&Puppy\机器视觉(负责分类)\期末项目\普通方式\model\model1'
model = torch.load(model_path,weights_only=False)
model.eval()


# 定义图像变换
transform = transforms.Compose([
    transforms.Resize((224, 224)),#大小调整
    transforms.RandomHorizontalFlip(),#随机地对输入图像进行水平翻转(数据增强)
    transforms.ToTensor(),#
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])#标准化(通过ImageNet计算得到)


dataset_path = Path(r'F:\pycharm_recommend_MachineVison\Kitty&Puppy\MachineVisionImage\images2')
dataset = ImageFolder(dataset_path, transform=transform)
with open("class_names.json", "r") as f:
    class_names = json.load(f)


def predict_image(image_path):
    image = Image.open(image_path).convert('RGB')
    image = transform(image).unsqueeze(0)  # Add batch dimension
    image = image.to(device)
    
    with torch.no_grad():
        outputs = model(image)
        # probabilities = torch.nn.functional.softmax(outputs, dim=1)
        _, predicted = torch.max(outputs, 1)
        class_index = predicted.item()
    
    return class_names[class_index]

# 预测示例
image_path = r'F:\pycharm_recommend_MachineVison\Kitty&Puppy\MachineVisionImage\images2\havanese\havanese_2.jpg'
predicted_class = predict_image(image_path)
print(f'The predicted class is: {predicted_class}')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值