PyTorch - 天气图片分类


项目说明

本文转载改编自:https://blog.csdn.net/m0_60524373/article/details/127195845


数据集说明 - 天气数据

下载地址:

https://www.kaggle.com/datasets/somesh24/multiclass-images-for-weather-classification


数据说明:

  • 4个分类:cloudy、rain、shine、sunrise
  • 共1125 个图片

rain
在这里插入图片描述


shine
在这里插入图片描述


sunrise
在这里插入图片描述


cloudy
在这里插入图片描述


代码实现

import os
import json
from PIL import Image 
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision
from torchvision import transforms
import torchvision.datasets as datasets

import random,pathlib 


数据处理

查看数据和分类

data_dir = '/Users/shushu/Documents/nlp_data/weather_photos'
 
data_dir = pathlib.Path(data_dir)
# PosixPath('/Users/shushu/Documents/nlp_data/weather_photos')

# 图片总数为: 1122
image_count = len(list(data_dir.glob('*/*.jpg')))

# 所有地址
data_paths = list(data_dir.glob('*/*.jpg'))

data_paths[:3]
'''
[PosixPath('/Users/shushu/Documents/nlp_data/weather_photos/cloudy/cloudy126.jpg'),
 PosixPath('/Users/shushu/Documents/nlp_data/weather_photos/cloudy/cloudy132.jpg'),
 PosixPath('/Users/shushu/Documents/nlp_data/weather_photos/cloudy/cloudy23.jpg')]
'''
 
  
str(data_paths[3]).split("/")[-2] # 'cloudy'


types = set([str(path).split("/")[-2] for path in data_paths ] )
types 


预处理数据
data_transform = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])
 
total_data = datasets.ImageFolder(total_datadir,transform=data_transform)
total_data 

Dataset ImageFolder
    Number of datapoints: 1125
    Root location: /Users/shushu/Documents/nlp_data/weather_photos
    StandardTransform
Transform: Compose(
               Resize(size=[224, 224], interpolation=PIL.Image.BILINEAR)
               ToTensor()
               Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
           )

数据集划分
train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size]) 

加载数据
# 设置dataset

batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=1)
# 检查数据格式

for X, y in test_dl:
    print("Shape of X [N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break
Shape of X [N, C, H, W]:  torch.Size([32, 3, 224, 224])
Shape of y:  torch.Size([32]) torch.int64

搭建模型网络

from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential,ReLU

num_classes = 4

class Model(nn.Module):
    
    def __init__(self):
        super(Model,self).__init__()
        
        # 卷积层
        self.layers = Sequential(
            # 第一层
            nn.Conv2d(3, 24, kernel_size=5),
            nn.BatchNorm2d(24),
            nn.ReLU(),
            
            # 第二层
            nn.Conv2d(24,64 , kernel_size=5),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2,2),
            nn.Conv2d(64, 128, kernel_size=5),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 24, kernel_size=5),
            nn.BatchNorm2d(24),
            nn.ReLU(),
            nn.MaxPool2d(2,2),
            nn.Flatten(),
            nn.Linear(24*50*50, 516,bias=True),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(516, 215,bias=True),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(215, num_classes,bias=True),
        )
        
    def forward(self, x):
        x = self.layers(x)
        return x    


device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = Model().to(device)
model

Model(
  (layers): Sequential(
    (0): Conv2d(3, 24, kernel_size=(5, 5), stride=(1, 1))
    (1): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
    (3): Conv2d(24, 64, kernel_size=(5, 5), stride=(1, 1))
    (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (5): ReLU()
    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (7): Conv2d(64, 128, kernel_size=(5, 5), stride=(1, 1))
    (8): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (9): ReLU()
    (10): Conv2d(128, 24, kernel_size=(5, 5), stride=(1, 1))
    (11): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (12): ReLU()
    (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (14): Flatten(start_dim=1, end_dim=-1)
    (15): Linear(in_features=60000, out_features=516, bias=True)
    (16): ReLU()
    (17): Dropout(p=0.5, inplace=False)
    (18): Linear(in_features=516, out_features=215, bias=True)
    (19): ReLU()
    (20): Dropout(p=0.5, inplace=False)
    (21): Linear(in_features=215, out_features=4, bias=True)
  )
)

训练模型

 
loss_fn    = nn.CrossEntropyLoss() # 创建损失函数
learn_rate = 1e-3 # 学习率
opt        = torch.optim.SGD(model.parameters(),lr=learn_rate)

训练函数
def train(dataloader, model, loss_fn, optimizer):
    
    size = len(dataloader.dataset)  # 训练集的大小,一共60000张图片
    num_batches = len(dataloader)   # 批次数目,1875(60000/32)
 
    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for X, y in dataloader:  # 获取图片及其标签
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
        
        # 反向传播
        optimizer.zero_grad()  # grad属性归零
        loss.backward()        # 反向传播
        optimizer.step()       # 每一步自动更新
        
        # 记录acc与loss
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
        
        print('-- ', train_acc, train_loss)
            
    train_acc  /= size
    train_loss /= num_batches
 
    return train_acc, train_loss

测试函数
def test (dataloader, model, loss_fn):
    
    size        = len(dataloader.dataset)  # 测试集的大小,一共10000张图片
    num_batches = len(dataloader)          # 批次数目,313(10000/32=312.5,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()
 
    test_acc  /= size
    test_loss /= num_batches
 
    return test_acc, test_loss

执行训练
epochs     = 5
train_loss = []
train_acc  = []
test_loss  = []
test_acc   = []

for epoch in range(epochs):
    print('\n-- ', epoch) 
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)
    
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
    
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    template = ('\n-- Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%,Test_loss:{:.3f}')
    
    print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss))
print('Done')
 

模型评估

# 1.Loss和Accuracy图
import matplotlib.pyplot as plt

#隐藏警告
import warnings

warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))

plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()


# 2.对结果进行预测

img_path = "/Users/xx/.../weather_photos/rain/rain80.jpg"
classes = ['cloudy', 'rain', 'shine', 'sunrise']


def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    img = Image.open(img_path)
    plt.imshow(img)
    # [N, C, H, W]
    img = data_transform(img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)
    model.eval()
    with torch.no_grad():
        # predict class
        output = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(output, dim=0)
        predict_cla = torch.argmax(predict).numpy()
        print(classes[predict_cla])
    plt.show()
     
main()

2023-03-21(二)

根据引用\[1\]和引用\[2\]的内容,你可以使用以下命令在虚拟环境中安装PyTorch和相关库: ``` mamba install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia ``` 这个命令会安装PyTorch、torchvision和torchaudio,并指定使用CUDA 11.7版本。同时,它会从pytorch和nvidia的频道中获取软件包。 然而,根据引用\[3\]的内容,如果你在指定的镜像源中找不到指定版本的PyTorch,可能会导致安装的是CPU版本而不是GPU版本。为了解决这个问题,你可以尝试使用其他镜像源或者手动指定安装GPU版本的PyTorch。 综上所述,你可以尝试使用以下命令来安装PyTorch和相关库,并指定使用CUDA 11.7版本: ``` mamba install pytorch torchvision pytorch-cuda=11.7 -c pytorch -c nvidia ``` 希望这能帮到你! #### 引用[.reference_title] - *1* [三分钟搞懂最简单的Pytorch安装流程](https://blog.csdn.net/weixin_44261300/article/details/129643480)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] - *2* [Pytorch与NVIDA驱动控制安装](https://blog.csdn.net/m0_48176714/article/details/129311194)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] - *3* [解决使用conda下载pytorch-gpu版本困难的问题](https://blog.csdn.net/qq_41963301/article/details/131070422)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值