P3:Pytorch实现天气识别

>- **🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/Z9yL_wt7L8aPOr9Lqb1K3w) 中的学习记录博客**
>- **🍖 原作者:[K同学啊](https://mtyjkh.blog.csdn.net/)**

我的环境:Anaconda

编译器:pytorch

首先附完整代码如下:

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets
import os
import PIL
import pathlib
import random
import matplotlib.pyplot as plt
from PIL import Image

def main():
    # Set the device to GPU if available
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Define data directories and classes
    data_dir = 'C:/Users/86138/OneDrive/桌面/第5天-没有加密版本/第5天/weather_photos'
    data_dir = pathlib.Path(data_dir)
    data_paths = list(data_dir.glob('*'))
    classeNames = [str(path).split("\\")[-1] for path in data_paths]

    # Display sample images
    image_folder = 'C:/Users/86138/OneDrive/桌面/第5天-没有加密版本/第5天/weather_photos/cloudy'
    image_files = [f for f in os.listdir(image_folder) if f.endswith((".jpg", ".png", ".jpeg"))]
    fig, axes = plt.subplots(3, 8, figsize=(16, 6))

    for ax, img_file in zip(axes.flat, image_files):
        img_path = os.path.join(image_folder, img_file)
        img = Image.open(img_path)
        ax.imshow(img)
        ax.axis('off')

    plt.tight_layout()
    plt.show()

    # Define transformations
    train_transforms = transforms.Compose([
        transforms.Resize([224, 224]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    # Load dataset
    total_data = datasets.ImageFolder(data_dir, transform=train_transforms)

    # Split dataset into training and testing sets
    train_size = int(0.8 * len(total_data))
    test_size = len(total_data) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])

    # Define data loaders
    batch_size = 32
    train_dl = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
    test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=1)

    # Display the shape of the first batch
    for X, y in test_dl:
        print("Shape of X [N, C, H, W]: ", X.shape)
        print("Shape of y: ", y.shape, y.dtype)
        break

    # Define the CNN model
    class Network_bn(nn.Module):
        def __init__(self):
            super(Network_bn, self).__init__()
            self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=0)
            self.bn1 = nn.BatchNorm2d(12)
            self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=0)
            self.bn2 = nn.BatchNorm2d(12)
            self.pool1 = nn.MaxPool2d(2, 2)
            self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=0)
            self.bn4 = nn.BatchNorm2d(24)
            self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=0)
            self.bn5 = nn.BatchNorm2d(24)
            self.pool2 = nn.MaxPool2d(2, 2)
            self.fc1 = nn.Linear(24 * 50 * 50, len(classeNames))

        def forward(self, x):
            x = torch.relu(self.bn1(self.conv1(x)))
            x = torch.relu(self.bn2(self.conv2(x)))
            x = self.pool1(x)
            x = torch.relu(self.bn4(self.conv4(x)))
            x = torch.relu(self.bn5(self.conv5(x)))
            x = self.pool2(x)
            x = x.view(-1, 24 * 50 * 50)
            x = self.fc1(x)
            return x

    # Initialize the model, loss function, and optimizer
    model = Network_bn().to(device)
    loss_fn = nn.CrossEntropyLoss()
    learn_rate = 1e-4
    opt = torch.optim.SGD(model.parameters(), lr=learn_rate)

    # Training loop
    def train(dataloader, model, loss_fn, optimizer):
        size = len(dataloader.dataset)
        num_batches = len(dataloader)
        train_loss, train_acc = 0, 0

        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            loss = loss_fn(pred, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
            train_loss += loss.item()

        train_acc /= size
        train_loss /= num_batches
        return train_acc, train_loss

    # Testing loop
    def test(dataloader, model, loss_fn):
        size = len(dataloader.dataset)
        num_batches = len(dataloader)
        test_loss, test_acc = 0, 0

        with torch.no_grad():
            for imgs, target in dataloader:
                imgs, target = imgs.to(device), target.to(device)
                target_pred = model(imgs)
                loss = loss_fn(target_pred, target)

                test_loss += loss.item()
                test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()

        test_acc /= size
        test_loss /= num_batches
        return test_acc, test_loss

    # Training and validation
    epochs = 20
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []

    for epoch in range(epochs):
        model.train()
        epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)

        model.eval()
        epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)

        train_acc.append(epoch_train_acc)
        train_loss.append(epoch_train_loss)
        test_acc.append(epoch_test_acc)
        test_loss.append(epoch_test_loss)

        template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%,Test_loss:{:.3f}')
        print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss, epoch_test_acc * 100, epoch_test_loss))

    print('Done')

    # Plotting results
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    plt.rcParams['figure.dpi'] = 100

    epochs_range = range(epochs)

    plt.figure(figsize=(12, 3))
    plt.subplot(1, 2, 1)
    plt.plot(epochs_range, train_acc, label='Training Accuracy')
    plt.plot(epochs_range, test_acc, label='Test Accuracy')
    plt.legend(loc='lower right')
    plt.title('Training and Validation Accuracy')

    plt.subplot(1, 2, 2)
    plt.plot(epochs_range, train_loss, label='Training Loss')
    plt.plot(epochs_range, test_loss, label='Test Loss')
    plt.legend(loc='upper right')
    plt.title('Training and Validation Loss')
    plt.show()

if __name__ == '__main__':
    main()

一、配置GPU

本设备具有NIVDA显卡,使用GPU运行,根据代码要求如遇不适用GPU的环境可使用CPU,代码如下

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets

import os,PIL,pathlib,random

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

device


二、导入数据

本次使用数据集为原稿作者提供下载

对各行内容解释如下:

①使用pathlib.Path()函数将字符串类型的文件夹路径转换为pathlib.Path对象;

②使用glob()方法获取data_dir路径下的所有文件路径,并以列表形式存储在data_paths中;

③通过split()函数对data_paths中的每个文件路径执行分割操作,获得各个文件所属的类别名称,并存储在classeNames中;

④打印classeNames列表,显示每个文件所属的类别名称。

data_dir = './data/'#修改为自己使用的路径
data_dir = pathlib.Path(data_dir)

data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[1] for path in data_paths]
classeNames

再进行可视化有

import matplotlib.pyplot as plt
from PIL import Image

# 指定图像文件夹路径
image_folder = './data/cloudy/'#修改为自己的路径

# 获取文件夹中的所有图像文件
image_files = [f for f in os.listdir(image_folder) if f.endswith((".jpg", ".png", ".jpeg"))]

# 创建Matplotlib图像
fig, axes = plt.subplots(3, 8, figsize=(16, 6))

# 使用列表推导式加载和显示图像
for ax, img_file in zip(axes.flat, image_files):
    img_path = os.path.join(image_folder, img_file)
    img = Image.open(img_path)
    ax.imshow(img)
    ax.axis('off')

# 显示图像
plt.tight_layout()
plt.show()

total_datadir = './data/'#修改为自己的路径

# 关于transforms.Compose的更多介绍可以参考:https://blog.csdn.net/qq_38251616/article/details/124878863
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = datasets.ImageFolder(total_datadir,transform=train_transforms)
total_data

三、划分数据集

train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_dataset, test_dataset

batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=1)

for X, y in test_dl:
    print("Shape of X [N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值