P3:Pytorch实现天气识别

>- **🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/0dvHCaOoFnW8SCp3JpzKxg) 中的学习记录博客**
>- **🍖 原作者:[K同学啊](https://mtyjkh.blog.csdn.net/)**

🏡我的环境:

  • 语言环境: Python 3.11.7
  • 编译器:Pycharm
  • 深度学习环境:
    • torch==1.12.1+cu113
    • torchvision==0.13.1+cu113
  • 一、 前期准备

    1. 设置GPU

  • import torch
    import torch.nn as nn
    import torch.optim as optim
    import torchvision.transforms as transforms
    from torch.utils.data import DataLoader, random_split
    from torchvision.datasets import ImageFolder
    from torch.nn.functional import relu  # 导入 relu 函数
    
    # 定义设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    2. 导入数据

  • data_dir = r'data'

    3. 数据预处理

  • # 图像转换
    transform = transforms.Compose([
        transforms.Resize([224, 224]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    4. 数据集处理

  • # 创建数据集
    total_data = ImageFolder(data_dir, transform=transform)
    
    # 划分数据集
    train_size = int(0.8 * len(total_data))
    test_size = len(total_data) - train_size
    train_dataset, test_dataset = random_split(total_data, [train_size, test_size])
    
    # 数据加载器
    batch_size = 32
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    二、构建简单的CNN网络

  • # 定义模型
    class Network_bn(nn.Module):
        def __init__(self, num_classes):
            super(Network_bn, self).__init__()
            self.conv1 = nn.Conv2d(3, 12, kernel_size=5, stride=1, padding=0)
            self.bn1 = nn.BatchNorm2d(12)
            self.conv2 = nn.Conv2d(12, 12, kernel_size=5, stride=1, padding=0)
            self.bn2 = nn.BatchNorm2d(12)
            self.pool1 = nn.MaxPool2d(2, 2)
            self.conv4 = nn.Conv2d(12, 24, kernel_size=5, stride=1, padding=0)
            self.bn4 = nn.BatchNorm2d(24)
            self.conv5 = nn.Conv2d(24, 24, kernel_size=5, stride=1, padding=0)
            self.bn5 = nn.BatchNorm2d(24)
            self.pool2 = nn.MaxPool2d(2, 2)
            # 使用 AdaptiveAvgPool2d 调整全连接层的输入大小
            self.adaptive_pool = nn.AdaptiveAvgPool2d((6, 6))
            self.fc1 = nn.Linear(24 * 6 * 6, 256)
            self.fc2 = nn.Linear(256, num_classes)
    
        def forward(self, x):
            x = relu(self.bn1(self.conv1(x)))
            x = relu(self.bn2(self.conv2(x)))
            x = self.pool1(x)
            x = relu(self.bn4(self.conv4(x)))
            x = relu(self.bn5(self.conv5(x)))
            x = self.pool2(x)
            x = self.adaptive_pool(x)
            x = x.view(x.size(0), -1)  # 展平特征图,为全连接层准备
            x = relu(self.fc1(x))
            x = self.fc2(x)
            return x  # 确保返回最后一个计算的结果
    
    # 实例化模型
    num_classes = len(total_data.classes)  # 获取类别数量
    model = Network_bn(num_classes).to(device)
    
    # 打印模型信息
    print(model)
    print(f"Model will be running on {device}")

    三、 训练模型

    1. 损失函数和优化器

  • criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=1e-4)

    2. 训练函数

  • def train(model, device, train_loader, optimizer, criterion):
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)  # 获取模型输出
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            _, predicted = output.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()
        train_loss = running_loss / len(train_loader.dataset)
        train_acc = correct / total
        return train_loss, train_acc

    3. 编写测试函数

  • def test(model, device, test_loader, criterion):
        model.eval()
        running_loss = 0.0
        correct = 0
        total = 0
        with torch.no_grad():
            for batch_idx, (data, target) in enumerate(test_loader):
                data, target = data.to(device), target.to(device)
                output = model(data)  # 获取模型输出
                loss = criterion(output, target)
                running_loss += loss.item()
                _, predicted = output.max(1)
                total += target.size(0)
                correct += predicted.eq(target).sum().item()
        test_loss = running_loss / len(test_loader.dataset)
        test_acc = correct / total
        return test_loss, test_acc        

    4. 正式训练

  • num_epochs = 20
    train_losses = []
    train_accs = []
    test_losses = []
    test_accs = []
    
    for epoch in range(num_epochs):
        print(f"Epoch {epoch+1}/{num_epochs}")
        train_loss, train_acc = train(model, device, train_loader, optimizer, criterion)
        test_loss, test_acc = test(model, device, test_loader, criterion)
        train_losses.append(train_loss)
        train_accs.append(train_acc)
        test_losses.append(test_loss)
        test_accs.append(test_acc)
        print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}")
        print(f"Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}")

    四、 结果可视化

  • # 绘制训练和测试准确率以及损失
    import matplotlib.pyplot as plt
    
    plt.figure(figsize=(12, 4))
    
    # 绘制准确率
    plt.subplot(1, 2, 1)
    plt.plot(train_accs, label='Train Acc')
    plt.plot(test_accs, label='Test Acc')
    plt.title('Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    
    # 绘制损失
    plt.subplot(1, 2, 2)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.title('Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    
    plt.tight_layout()
    plt.show()

    对深度学习的步骤有了更深的理解,对CNN网络有了理解

  • 10
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值