Pytorch实战 | 猴痘病识别

参考:
Pytorch实战 | 第4天:猴痘病识别
数据集:

https://www.aliyundrive.com/s/dGsVQS8G7Vf
提取码: 8nq9
点击链接保存,或者复制本段内容,打开「阿里云盘」APP ,无需下载极速在线查看,视频原画倍速播放。

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets
import matplotlib.pyplot as plt
import os, PIL, pathlib
from torch.autograd import Variable

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 获取标签名
data_dir = 'data/'
data_dir = pathlib.Path(data_dir)
data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[1] for path in data_paths]

# 导入数据
transforms = transforms.Compose([transforms.ToTensor(),
                                 transforms.Resize([224, 224]),
                                 transforms.Normalize(mean=[0.5], std=[0.5])])
data = datasets.ImageFolder(root=data_dir, transform=transforms)
classes = list(data.class_to_idx)
# 划分数据
from torch.utils.data import random_split

data_train, data_test = random_split(data, lengths=[int(0.8 * len(data)), len(data) - int(0.8 * len(data))],
                                     generator=torch.Generator().manual_seed(0))
# 加载数据
batch_size = 16
data_loader_train = torch.utils.data.DataLoader(dataset=data_train, batch_size=batch_size, shuffle=True)
data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=batch_size, shuffle=True)


# 建立模型
class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5),
            torch.nn.BatchNorm2d(12),
            torch.nn.ReLU(),
            torch.nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5),
            torch.nn.BatchNorm2d(12),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2, 2),
            torch.nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5),
            torch.nn.BatchNorm2d(24),
            torch.nn.ReLU(),
            torch.nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5),
            torch.nn.BatchNorm2d(24),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2, 2)
        )
        self.fc = torch.nn.Linear(50 * 50 * 24, len(classeNames))
# 前向传播
    def forward(self, x):
        x = self.conv1(x)
        x = x.view(-1, 50 * 50 * 24)
        x = self.fc(x)
        return x


from PIL import Image

# 预测图片
def predict_one_image(image_path, model, transform, classes):
    test_img = Image.open(image_path).convert('RGB')
    plt.imshow(test_img)  # 展示预测的图片

    test_img = transform(test_img)
    img = test_img.to(device).unsqueeze(0)

    model.eval()
    output = model(img)

    _, pred = torch.max(output, 1)
    pred_class = classes[pred]
    print(f'预测结果是:{pred_class}')

# 模型训练和测试

model = Model()
model = model.to(device)
# epochs = 100
# optimizer = torch.optim.Adam(model.parameters())
# cost = torch.nn.CrossEntropyLoss()
#
# train_loss = []
# train_acc = []
# test_loss = []
# test_acc = []
#
# for i in range(epochs):
#     running_loss = 0.0
#     running_correct = 0
#     testing_correct = 0
#     testing_loss = 0.0
#     print("Epoch{}/{}".format(i + 1, epochs))
#     print("-" * 20)
#     size = len(data_train)  # 测试集的大小,一共10000张图片
#     num_batches = len(data_loader_train)
#
#     for data in data_loader_train:
#         x_train, y_train = data
#         x_train, y_train = x_train.to(device), y_train.to(device)
#         x_train, y_train = Variable(x_train), Variable(y_train)
#         outputs = model(x_train)
#         optimizer.zero_grad()
#         _, pred = torch.max(outputs.data, 1)
#         loss = cost(outputs, y_train)
#         loss.backward()
#         optimizer.step()
#         running_loss += loss
#         running_correct += torch.sum(pred == y_train)
#     train_acc.append(running_correct / size)
#     train_loss.append(running_loss / num_batches)
#
#     size = len(data_test)  # 测试集的大小
#     num_batches = len(data_loader_test)  # 批次数目
#
#     for data in data_loader_test:
#         x_test, y_test = data
#         x_test, y_test = x_test.to(device), y_test.to(device)
#         x_test, y_test = Variable(x_test), Variable(y_test)
#         with torch.no_grad():
#             outputs = model(x_test)
#             _, pred = torch.max(outputs.data, 1)
#             loss = cost(outputs, y_test)
#             testing_loss += loss
#             testing_correct += torch.sum(pred.data == y_test.data)
#
#             print("Loss is:{:.4f},Train Accuracy is:{:.4f}%, Test Accuracy is:{:.4f}".format(
#                 running_loss / len(data_train),
#                 100 * running_correct / len(
#                     data_train),
#                 100 * testing_correct / len(
#                     data_test)))
#     test_acc.append(testing_correct / size)
#     test_loss.append(testing_loss / num_batches)

# PATH = './model.pth'  # 保存的参数文件名
# model.load_state_dict(torch.load(PATH, map_location=device))
#
# import matplotlib.pyplot as plt
# # 预测训练集中的某张照片
# predict_one_image(image_path='./data/Monkeypox/M01_01_00.jpg',
#                   model=model,
#                   transform=transforms,
#                   classes=classes)

# Loss与Accuracy图


# 隐藏警告
# import warnings
#
# warnings.filterwarnings("ignore")  # 忽略警告信息
# plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
# plt.rcParams['figure.dpi'] = 100  # 分辨率
#
# epochs_range = range(epochs)
#
# plt.figure(figsize=(12, 3))
# plt.subplot(1, 2, 1)
#
# plt.plot(epochs_range, train_acc, label='Training Accuracy')
# plt.plot(epochs_range, test_acc, label='Test Accuracy')
# plt.legend(loc='lower right')
# plt.title('Training and Validation Accuracy')
#
# plt.subplot(1, 2, 2)
# plt.plot(epochs_range, train_loss, label='Training Loss')
# plt.plot(epochs_range, test_loss, label='Test Loss')
# plt.legend(loc='upper right')
# plt.title('Training and Validation Loss')
# plt.show()

遇到的问题:

在这里插入图片描述

transforms.ToTensor没加括号,遇到上面这种问题就去检查检查前面的函数括号有没有漏写或者多写

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

回家种蜜柚

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值