【人工智能项目实践】Day2基于CNN的ciffar10图像分类任务 FashionMNIST图像分类任务 人脸微笑数据处理

Ciffar10数据集 图像分类任务

数据集下载:通过代码自动下载即可

数据准备

import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import torch.nn.functional as F
# 定义归一化参数
transform = transforms.Compose([transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # 第一个 rgb均值
# 第二个是rgb三通道的方差
train_set = torchvision.datasets.CIFAR10(root='./data', 
                                         train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, 
                                           batch_size=16, shuffle=True, num_workers=0)

# 测试数据
test_set = torchvision.datasets.CIFAR10(root='./data', 
                                         train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, 
                                           batch_size=16, shuffle=False, num_workers=0)
                                           
dataiter = iter(train_loader)
images, labels = dataiter.next()
def show_image(img):
    # 输入  [c, h, w]
    img_np = img.numpy()# 变 numpy
    img_np = np.transpose(img_np, (1, 2, 0))# [h, w, c]
    plt.imshow(img_np)

show_image(torchvision.utils.make_grid(images))

在这里插入图片描述

训练和测试

train_loss_hist = []
test_loss_hist = []
# 建议20次
for epoch in tqdm(range(2)):
    cnn.train() #训练部分
    running_loss = 0.0 #batch损失累计
    for i , data in enumerate(train_loader):
        images, labels = data
        outputs = cnn(images)
        loss = criterion(outputs, labels) # 损失求和
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item() # 一个batch的损失
        
        if (i%250 == 0):
            # 每250个  250 x 16 进行一次记录 和测试
            cnn.eval() #验证部分
            with torch.no_grad():
                for test_data in test_loader:
                    test_images, test_label = test_data
                    test_outputs = cnn(test_images)
                    test_loss = criterion(test_outputs, test_label)
            train_loss_hist.append(running_loss/250) # 250个batch的平均损失
            test_loss_hist.append(test_loss.item())
            running_loss = 0.0
            #torch.save(cnn,"model/cnn_image_model_epoch_{}_step{}.pkl".format(epoch,i))
            print("epoch:{} step:{} train Loss:{} test Loss:{}".format(epoch+1,i,loss.item(),test_loss.item()))
    torch.save(cnn,"model/cnn_image_model_epoch_{}_train Loss_{}_test_Loss_{}.pkl".format(epoch,loss.item(),test_loss.item())) #每个epoch保存一次
    print("epoch:{} train Loss:{} test Loss:{}".format(epoch+1,loss.item(),test_loss.item()))
            

模型构建

模型

class CNN(nn.Module):
    def __init__(self):
            # 输入频道的结构 3 x 32 x 32 
            super(CNN, self).__init__()
            # 卷积层
            self.conv1 = nn.Conv2d(3, 6, 3)
            #  卷积层
            self.conv2 = nn.Conv2d(6, 16, 3)
            #  全连接层 12544
            self.fc1 = nn.Linear(16*28*28, 512)
            self.fc2 = nn.Linear(512, 64)
            self.fc3 = nn.Linear(64, 10)
    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)

        x = self.conv2(x)
        x = F.relu(x)
        # fc reshape
        x = x.view(-1, 16*28*28)
        x = self.fc1(x)
        x = F.relu(x)


        x = self.fc2(x)
        x = F.relu(x)

        x = self.fc3(x)
            
        return x
cnn = CNN()
cnn

CNN(
(conv1): Conv2d(3, 6, kernel_size=(3, 3), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(3, 3), stride=(1, 1))
(fc1): Linear(in_features=12544, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=64, bias=True)
(fc3): Linear(in_features=64, out_features=10, bias=True)
)

损失 + 优化器

import torch.optim as optim
criterion = nn.CrossEntropyLoss()# 损失
optimizer = optim.Adam(cnn.parameters(), lr = 0.001) 

训练

train_loss_hist = []
test_loss_hist = []
# 建议20次
for epoch in tqdm(range(2)):
    cnn.train() #训练部分
    running_loss = 0.0 #batch损失累计
    for i , data in enumerate(train_loader):
        images, labels = data
        outputs = cnn(images)
        loss = criterion(outputs, labels) # 损失求和
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item() # 一个batch的损失
        
        if (i%250 == 0):
            # 每250个  250 x 16 进行一次记录 和测试
            cnn.eval() #验证部分
            with torch.no_grad():
                for test_data in test_loader:
                    test_images, test_label = test_data
                    test_outputs = cnn(test_images)
                    test_loss = criterion(test_outputs, test_label)
            train_loss_hist.append(running_loss/250) # 250个batch的平均损失
            test_loss_hist.append(test_loss.item())
            running_loss = 0.0
            #torch.save(cnn,"model/cnn_image_model_epoch_{}_step{}.pkl".format(epoch,i))
            print("epoch:{} step:{} train Loss:{} test Loss:{}".format(epoch+1,i,loss.item(),test_loss.item()))
    torch.save(cnn,"model/cnn_image_model_epoch_{}_train Loss_{}_test_Loss_{}.pkl".format(epoch,loss.item(),test_loss.item())) #每个epoch保存一次
    print("epoch:{} train Loss:{} test Loss:{}".format(epoch+1,loss.item(),test_loss.item()))
            

在这里插入图片描述

fer2013 表情分类数据集

关键点:构建数据集加载工具类

通过工具类:直接返回numpy格式的数据和标签


FashionMNIST数据集 服装图片分类识别

李沐课程中有讲

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值