第P3周:天气识别

目录

一、代码及运行结果

1.前期准备

2.构建简单的CNN网络

 3.训练模型

 4.结果可视化

  二、个人总结

1.卷积层

 2.激活函数

3.池化层

 


一、代码及运行结果

1.前期准备

import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasets

import os,PIL,pathlib,random

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

data_dir = './datasets/weather_photos/'
data_dir = pathlib.Path(data_dir)

data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("/")[2] for path in data_paths]
print(classeNames)

import matplotlib.pyplot as plt
from PIL import Image

# 指定图像文件夹路径
image_folder = './datasets/weather_photos/cloudy/'

# 获取文件夹中的所有图像文件
image_files = [f for f in os.listdir(image_folder) if f.endswith((".jpg", ".png", ".jpeg"))]

# 创建Matplotlib图像
fig, axes = plt.subplots(3, 8, figsize=(16, 6))

# 使用列表推导式加载和显示图像
for ax, img_file in zip(axes.flat, image_files):
    img_path = os.path.join(image_folder, img_file)
    img = Image.open(img_path)
    ax.imshow(img)
    ax.axis('off')

# 显示图像
plt.tight_layout()
plt.show()


total_datadir = './datasets/weather_photos/'

# 关于transforms.Compose的更多介绍可以参考:https://blog.csdn.net/qq_38251616/article/details/124878863
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406], 
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = datasets.ImageFolder(total_datadir,transform=train_transforms)

train_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])

batch_size = 32

train_dl = torch.utils.data.DataLoader(train_dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=1)


for X, y in test_dl:
    print("Shape of X [N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break
    
['cloudy', 'rain', 'shine', 'sunrise']
Shape of X [N, C, H, W]:  torch.Size([32, 3, 224, 224])
Shape of y:  torch.Size([32]) torch.int64

 

2.构建简单的CNN网络

import torch.nn.functional as F

class Network_bn(nn.Module):
    def __init__(self):
        super(Network_bn, self).__init__()
        """
        nn.Conv2d()函数:
        第一个参数(in_channels)是输入的channel数量
        第二个参数(out_channels)是输出的channel数量
        第三个参数(kernel_size)是卷积核大小
        第四个参数(stride)是步长,默认为1
        第五个参数(padding)是填充大小,默认为0
        """
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(12)
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn2 = nn.BatchNorm2d(12)
        self.pool = nn.MaxPool2d(2,2)
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn4 = nn.BatchNorm2d(24)
        self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn5 = nn.BatchNorm2d(24)
        self.fc1 = nn.Linear(24*50*50, len(classeNames))

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))      
        x = F.relu(self.bn2(self.conv2(x)))     
        x = self.pool(x)                        
        x = F.relu(self.bn4(self.conv4(x)))     
        x = F.relu(self.bn5(self.conv5(x)))  
        x = self.pool(x)                        
        x = x.view(-1, 24*50*50)
        x = self.fc1(x)

        return x

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

model = Network_bn().to(device)
model
Using cuda device

Out[2]:

Network_bn(
  (conv1): Conv2d(3, 12, kernel_size=(5, 5), stride=(1, 1))
  (bn1): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (conv2): Conv2d(12, 12, kernel_size=(5, 5), stride=(1, 1))
  (bn2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (conv4): Conv2d(12, 24, kernel_size=(5, 5), stride=(1, 1))
  (bn4): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (conv5): Conv2d(24, 24, kernel_size=(5, 5), stride=(1, 1))
  (bn5): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (fc1): Linear(in_features=60000, out_features=4, bias=True)
)

 

 3.训练模型

loss_fn    = nn.CrossEntropyLoss() # 创建损失函数
learn_rate = 1e-4 # 学习率
opt        = torch.optim.SGD(model.parameters(),lr=learn_rate)


# 训练循环
def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)  # 训练集的大小,一共60000张图片
    num_batches = len(dataloader)   # 批次数目,1875(60000/32)

    train_loss, train_acc = 0, 0  # 初始化训练损失和正确率
    
    for X, y in dataloader:  # 获取图片及其标签
        X, y = X.to(device), y.to(device)
        
        # 计算预测误差
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
        
        # 反向传播
        optimizer.zero_grad()  # grad属性归零
        loss.backward()        # 反向传播
        optimizer.step()       # 每一步自动更新
        
        # 记录acc与loss
        train_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()
            
    train_acc  /= size
    train_loss /= num_batches

    return train_acc, train_loss

def test (dataloader, model, loss_fn):
    size        = len(dataloader.dataset)  # 测试集的大小,一共10000张图片
    num_batches = len(dataloader)          # 批次数目,313(10000/32=312.5,向上取整)
    test_loss, test_acc = 0, 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()
            test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()

    test_acc  /= size
    test_loss /= num_batches

    return test_acc, test_loss

epochs     = 20
train_loss = []
train_acc  = []
test_loss  = []
test_acc   = []

for epoch in range(epochs):
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)
    
    model.eval()
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
    
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%,Test_loss:{:.3f}')
    print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss))
print('Done')
Epoch: 1, Train_acc:63.8%, Train_loss:0.973, Test_acc:45.8%,Test_loss:1.136
Epoch: 2, Train_acc:80.6%, Train_loss:0.616, Test_acc:75.1%,Test_loss:0.754
Epoch: 3, Train_acc:85.7%, Train_loss:0.480, Test_acc:76.4%,Test_loss:0.772
Epoch: 4, Train_acc:86.2%, Train_loss:0.427, Test_acc:77.3%,Test_loss:0.623
Epoch: 5, Train_acc:87.4%, Train_loss:0.389, Test_acc:80.4%,Test_loss:0.535
Epoch: 6, Train_acc:90.3%, Train_loss:0.345, Test_acc:81.3%,Test_loss:0.766
Epoch: 7, Train_acc:89.6%, Train_loss:0.355, Test_acc:80.9%,Test_loss:0.549
Epoch: 8, Train_acc:90.1%, Train_loss:0.353, Test_acc:83.1%,Test_loss:0.713
Epoch: 9, Train_acc:91.3%, Train_loss:0.335, Test_acc:84.0%,Test_loss:0.460
Epoch:10, Train_acc:92.0%, Train_loss:0.307, Test_acc:78.7%,Test_loss:1.111
Epoch:11, Train_acc:91.9%, Train_loss:0.279, Test_acc:78.2%,Test_loss:0.532
Epoch:12, Train_acc:92.3%, Train_loss:0.290, Test_acc:78.7%,Test_loss:0.499
Epoch:13, Train_acc:93.3%, Train_loss:0.239, Test_acc:82.7%,Test_loss:0.422
Epoch:14, Train_acc:93.9%, Train_loss:0.224, Test_acc:82.2%,Test_loss:0.411
Epoch:15, Train_acc:93.8%, Train_loss:0.218, Test_acc:82.7%,Test_loss:0.462
Epoch:16, Train_acc:93.8%, Train_loss:0.223, Test_acc:84.9%,Test_loss:0.413
Epoch:17, Train_acc:94.2%, Train_loss:0.238, Test_acc:78.7%,Test_loss:0.468
Epoch:18, Train_acc:95.1%, Train_loss:0.239, Test_acc:83.1%,Test_loss:0.382
Epoch:19, Train_acc:94.9%, Train_loss:0.194, Test_acc:83.6%,Test_loss:0.395
Epoch:20, Train_acc:95.8%, Train_loss:0.185, Test_acc:83.6%,Test_loss:0.421
Done

 

 4.结果可视化

import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

 

  二、个人总结

对卷积层、池化层和激活函数的深入理解

1.卷积层

在卷积神经网络(CNN)中,不同层级的卷积层之间存在前后关系,并且每个层级学习到的特征可以说是越来越复杂。

通常情况下,较前面的卷积层主要学习一些低级的特征,例如边缘、纹理等。这是因为在浅层卷积层中,滤波器较小,感受野(receptive field)较小,只能捕捉到图像的局部信息。

随着深度增加,较后面的卷积层能够学习到更高级别的特征,例如形状、物体部分等。这是因为在深层卷积层中,滤波器的感受野逐渐增大,能够观察到更广阔的空间范围,从而能够捕捉到更全局和抽象的特征。

通过堆叠多个卷积层,网络可以逐渐提取出更复杂的特征表示。低级特征提供了图像的细节和局部结构信息,而高级特征则提供了更高层次的语义信息。

 2.激活函数

激活函数在神经网络中起到激活或抑制的作用。

激活函数主要应用于神经网络的每个神经元上,用于引入非线性关系。它将神经元的输入加权求和后的结果进行转换,并产生一个输出信号。

激活函数的作用之一是激活神经元。当输入超过一定阈值时,激活函数会将神经元的输出映射为一个较大的值,从而激活该神经元,并将信息传递给下一层。这种激活作用使得神经网络能够对复杂的非线性关系进行建模和学习。

另一方面,激活函数也可以起到抑制的作用。当输入未达到一定阈值时,激活函数将神经元的输出映射为一个较小的值或者接近于零的值,从而抑制该神经元的激活状态。这种抑制作用有助于神经网络对噪声或不相关的特征进行筛选和忽略,提高网络的鲁棒性和泛化能力。

常见的激活函数包括ReLU(Rectified Linear Unit)、Sigmoid、Tanh等。它们都具有不同的激活和抑制特性,适用于不同的场景和任务。

3.池化层

池化层与下采样:

池化层通过降低特征图的空间尺寸来减少计算量并提取主要特征。在进行下采样时,通常会使用池化操作。

池化操作是指在每个池化窗口(也称为池化核)内对输入进行聚合或采样,从而生成一个单一的输出值。最常见的池化操作包括最大池化(Max Pooling)和平均池化(Average Pooling)。

最大池化中,池化窗口内的最大值被选择作为输出值,这有助于保留主要特征并抑制噪声。最大池化可以提取出图像中最显著的特征,同时具有一定程度的平移不变性。

而在平均池化中,池化窗口内的数值平均值被计算并作为输出值。平均池化主要用于减少特征图的维度,并且相比最大池化而言,更加平滑和稳定。

下采样是一种降低图像或特征的空间尺寸的操作。它可以减小图像的分辨率或特征的维度,从而降低计算复杂度并提取主要特征。在卷积神经网络(CNN)中,下采样通常通过池化层(如最大池化或平均池化)来实现。

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值