第P10周:Pytorch实现车牌识别

一、导入数据

from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.models as models
import torch.nn.functional as F
import torch.nn as nn
import torch, torchvision

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

device(type=‘cpu’)

1.获取类别名

import os,PIL,random,pathlib
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

data_dir = './licence_plate/'
data_dir = pathlib.Path(data_dir)

data_paths  = list(data_dir.glob('*'))
classeNames = [str(path).split("/")[1].split("_")[1].split(".")[0] for path in data_paths]

print(classeNames)

[‘沪G1CE81’, ‘云G86LR6’, ‘鄂U71R9F’, ‘津G467JR’, ‘京Y948BY’, ‘蒙LN06N1’, ‘津G18HA9’, ‘蒙JC6834’, ‘鄂C173LW’, ‘云RD5S31’, ‘贵A9BS54’, ‘云N30T0F’, ‘渝F500QN’, ’

G75EW9’, ‘陕L6F4Y7’, ‘苏KHK488’, ‘闽G86AN5’, ‘桂P7C1G4’, ‘豫LN26V3’, ‘冀T0G43L’, ‘京T77UG0’, ‘浙GEB103’, ‘藏A10GB5’, ‘藏A4L23L’, ‘蒙D650DK’, ‘新ZT953E’, ‘吉MD37S5’, ‘沪LH136X’, ‘辽Z9C5T4’, ‘贵NY7W90’, ‘渝H3V0S4’, ‘蒙A05N5Y’]

data_paths = list(data_dir.glob('*'))
data_paths_str = [str(path) for path in data_paths]
data_paths_str

[‘licence_plate/000008250_沪G1CE81.jpg’,
‘licence_plate/000015082_云G86LR6.jpg’,
‘licence_plate/000004721_鄂U71R9F.jpg’,
‘licence_plate/000000682_津G467JR.jpg’

‘licence_plate/000006515_鄂Q8S08J.jpg’,
…]

2.数据可视化

plt.figure(figsize=(14, 5))
plt.suptitle("数据示例", fontsize=15)

for i in range(18):
    plt.subplot(3,6,i+1)
    # plt.xticks([])
    # plt.yticks([])
    # plt.grid(False)
    
    # 显示图片
    images = plt.imread(data_paths_str[i])
    plt.imshow(images)
    
plt.show()

在这里插入图片描述

3.标签数字化

import numpy as np

char_enum = ["京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁",\
              "豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","军","使"]

number = [str(i) for i in range(0, 10)]  ## 0到9的数字
alphabet = [chr(i) for i in range(65, 91)]  ## A到Z的字母
char_set = char_enum + number + alphabet
char_set_len = len(char_set)
label_name_len = len(classeNames[0])

## 将字符串数字化
def text2vec(text):
    vector = np.zeros([label_name_len, char_set_len])
    for i, c in enumerate(text):
        idx = char_set.index(c)
        vector[i][idx] = 1.0
    return vector

all_labels = [text2vec(i) for i in classeNames]

4.加载数据文件

import os 
import pandas as pd 
from torchvision.io import read_image
from torch.utils.data import Dataset
import torch.utils.data as data
from PIL import Image

class MyDataset(data.Dataset):
    def __init__(self, all_labels, data_paths_str, transform):
        self.img_labels = all_labels     ## 获取标签信息
        self.img_dir = data_paths_str    ## 图像目录路径
        self.transform = transform       ## 目标转换函数
        
    def __len__(self):
        return len(self.img_labels)
    
    def __getitem__(self, index):
        image = Image.open(self.img_dir[index]).convert('RGB')
        label = self.img_labels[index]     ## 获取图像对应的标签
        
        if self.transform:
            image = self.transform(image)
            
        return image, label     ## 返回图像和标签    
## total_datadir = './03_traffic_sign/'

train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),          ## 将输入图片resize成统一尺寸
    transforms.ToTensor(),                  ## 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(                   ## 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])          ## 其中其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

total_data = MyDataset(all_labels, data_paths_str, train_transforms)
total_data

<main.MyDataset at 0x14dd21fc0>

5.划分数据

train_size = int(0.8 * len(total_data))
test_size = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_size, test_size

(10940, 2735)

train_loader = torch.utils.data.DataLoader(train_dataset, 
                                           batch_size=16, 
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(test_dataset, 
                                          batch_size=16, 
                                          shuffle=True)


print("The number of images in a training set is:", len(train_loader)*16)
print("The number of images in a test set is:", len(test_loader)*16)
print("The number of batches per epoch is: ", len(train_loader))

The number of images in a training set is: 10944
The number of images in a test set is: 2736
The number of batches per epoch is: 684

for X, y in test_loader:
    print("Shape of X[N, C, H, W]: ", X.shape)
    print("Shape of y: ", y.shape, y.dtype)
    break

Shape of X[N, C, H, W]: torch.Size([16, 3, 224, 224])
Shape of y: torch.Size([16, 7, 69]) torch.float64

二、自建模型

class Network_bn(nn.Module):
    def __init__(self):
        super(Network_bn, self).__init__()
        
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(12)
        self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=0)
        self.bn2 = nn.BatchNorm2d(12)
        self.pool = nn.MaxPool2d(2,2)
        self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn4 = nn.BatchNorm2d(24)
        self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=0)
        self.bn5 = nn.BatchNorm2d(24)
        self.fc1 = nn.Linear(24*50*50, label_name_len*char_set_len)
        self.reshape = Reshape([label_name_len, char_set_len])
        
    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = self.pool(x)
        x = F.relu(self.bn4(self.conv4(x)))
        x = F.relu(self.bn5(self.conv5(x)))
        x = self.pool(x)
        x = x.view(-1, 24*50*50)
        x = self.fc1(x)
        
        ## 最终reshape
        x = self.reshape(x)
        
        return x

## 定义Reshape层
class Reshape(nn.Module):
    def __init__(self, shape):
        super(Reshape, self).__init__()
        self.shape = shape
        
    def forward(self, x):
        return x.view(x.size(0), *self.shape)
    
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device". format(device))

model = Network_bn().to(device)
model        

Using cpu device

Network_bn(
(conv1): Conv2d(3, 12, kernel_size=(5, 5), stride=(1, 1))
(bn1): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(12, 12, kernel_size=(5, 5), stride=(1, 1))
(bn2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv4): Conv2d(12, 24, kernel_size=(5, 5), stride=(1, 1))
(bn4): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv5): Conv2d(24, 24, kernel_size=(5, 5), stride=(1, 1))
(bn5): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(fc1): Linear(in_features=60000, out_features=483, bias=True)
(reshape): Reshape()
)

import torchsummary

## 显示网络结构
torchsummary.summary(model,(3, 224, 224))

输出:

----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1         [-1, 12, 220, 220]             912
       BatchNorm2d-2         [-1, 12, 220, 220]              24
            Conv2d-3         [-1, 12, 216, 216]           3,612
       BatchNorm2d-4         [-1, 12, 216, 216]              24
         MaxPool2d-5         [-1, 12, 108, 108]               0
            Conv2d-6         [-1, 24, 104, 104]           7,224
       BatchNorm2d-7         [-1, 24, 104, 104]              48
            Conv2d-8         [-1, 24, 100, 100]          14,424
       BatchNorm2d-9         [-1, 24, 100, 100]              48
        MaxPool2d-10           [-1, 24, 50, 50]               0
           Linear-11                  [-1, 483]      28,980,483
          Reshape-12                [-1, 7, 69]               0
================================================================
Total params: 29,006,799
Trainable params: 29,006,799
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 26.56
Params size (MB): 110.65
Estimated Total Size (MB): 137.79
----------------------------------------------------------------

三、模型训练

1.优化器与损失函数

optimizer = torch.optim.Adam(model.parameters(), 
                             lr=1e-4, 
                             weight_decay=0.0001)

loss_model = nn.CrossEntropyLoss()
from torch.autograd import Variable

def test(model, test_loader, loss_model):
    size = len(test_loader.dataset)
    num_batches = len(test_loader)
    
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in test_loader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            
            ## 累积损失
            test_loss += loss_model(pred, y).item()
            
            ## 如果 y 是 one-hot 编码,将其转化为标签类别 
            if y.dim() == 3 and y.size(2) == 69:     
                y = y.argmax(dim = 2)    ## 将 y 从 [batch_size, 69, 7] 转化为 [batch_size, 7]
                    
            pred_labels = pred.argmax(dim = 2)
            correct += (pred_labels == y).type(torch.float).sum().item()
            
    
        test_loss /= num_batches
        
        total_elements = size * 7
        correct /= total_elements
        
        print(f"Avg loss: {test_loss:>8f} \n ")
        print(f"Test accuracy: {correct * 100:.1f}%")
        return correct, test_loss
    
    
def train(model, train_loader, loss_model, optimizer):
    model=model.to(device)
    model.train()
    
    for i, (images, labels) in enumerate(train_loader, 0):  ## 0是标记起始位置的值
        
        image = Variable(images.to(device))
        labels = Variable(labels.to(device))
        
        optimizer.zero_grad()
        outputs = model(images)
        
        loss = loss_model(outputs, labels)
        loss.backward()
        optimizer.step()
        
        if i % 1000 == 0:
            print('[%5d] loss: %.3f' % (i, loss))

2.模型的训练

print (y.shape)
torch.Size([16, 7, 69])
test_acc_list = []
test_loss_list = []
epochs = 30

for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------")
    train(model, train_loader, loss_model, optimizer)
    test_acc, test_loss = test(model, test_loader, loss_model)
    test_acc_list.append(test_acc)
    test_loss_list.append(test_loss)
    
print("Done")    

输出:

Epoch 1
-------------------------
[    0] loss: 0.042
Avg loss: 0.051550 
 
Test accuracy: 43.1%
Epoch 2
-------------------------
[    0] loss: 0.024
Avg loss: 0.041734 
 
Test accuracy: 49.3%
Epoch 3
-------------------------
[    0] loss: 0.022
Avg loss: 0.039898 
 
Test accuracy: 50.8%
Epoch 4
-------------------------
[    0] loss: 0.029
Avg loss: 0.036895 
 
Test accuracy: 54.0%
Epoch 5
-------------------------
[    0] loss: 0.034
Avg loss: 0.033945 
 
Test accuracy: 56.3%
Epoch 6
-------------------------
[    0] loss: 0.028
Avg loss: 0.032999 
 
Test accuracy: 56.5%
Epoch 7
-------------------------
[    0] loss: 0.037
Avg loss: 0.032969 
 
Test accuracy: 58.6%
Epoch 8
-------------------------
[    0] loss: 0.037
Avg loss: 0.032173 
 
Test accuracy: 58.7%
Epoch 9
-------------------------
[    0] loss: 0.022
Avg loss: 0.030094 
 
Test accuracy: 60.4%
Epoch 10
-------------------------
[    0] loss: 0.019
Avg loss: 0.031460 
 
Test accuracy: 58.7%
Epoch 11
-------------------------
[    0] loss: 0.025
Avg loss: 0.030107 
 
Test accuracy: 60.3%
Epoch 12
-------------------------
[    0] loss: 0.006
Avg loss: 0.029454 
 
Test accuracy: 61.3%
Epoch 13
-------------------------
[    0] loss: 0.018
Avg loss: 0.028027 
 
Test accuracy: 63.4%
Epoch 14
-------------------------
[    0] loss: 0.014
Avg loss: 0.027596 
 
Test accuracy: 63.0%
Epoch 15
-------------------------
[    0] loss: 0.026
Avg loss: 0.027740 
 
Test accuracy: 63.7%
Epoch 16
-------------------------
[    0] loss: 0.025
Avg loss: 0.027099 
 
Test accuracy: 63.8%
Epoch 17
-------------------------
[    0] loss: 0.020
Avg loss: 0.027430 
 
Test accuracy: 63.6%
Epoch 18
-------------------------
[    0] loss: 0.021
Avg loss: 0.027482 
 
Test accuracy: 63.3%
Epoch 19
-------------------------
[    0] loss: 0.017
Avg loss: 0.026714 
 
Test accuracy: 64.0%
Epoch 20
-------------------------
[    0] loss: 0.008
Avg loss: 0.026565 
 
Test accuracy: 63.3%
Epoch 21
-------------------------
[    0] loss: 0.016
Avg loss: 0.026697 
 
Test accuracy: 64.0%
Epoch 22
-------------------------
[    0] loss: 0.016
Avg loss: 0.025678 
 
Test accuracy: 65.1%
Epoch 23
-------------------------
[    0] loss: 0.027
Avg loss: 0.026265 
 
Test accuracy: 64.8%
Epoch 24
-------------------------
[    0] loss: 0.013
Avg loss: 0.025437 
 
Test accuracy: 64.5%
Epoch 25
-------------------------
[    0] loss: 0.007
Avg loss: 0.026368 
 
Test accuracy: 64.1%
Epoch 26
-------------------------
[    0] loss: 0.009
Avg loss: 0.027329 
 
Test accuracy: 63.4%
Epoch 27
-------------------------
[    0] loss: 0.023
Avg loss: 0.025453 
 
Test accuracy: 63.8%
Epoch 28
-------------------------
[    0] loss: 0.026
Avg loss: 0.026028 
 
Test accuracy: 64.7%
Epoch 29
-------------------------
[    0] loss: 0.021
Avg loss: 0.025452 
 
Test accuracy: 65.4%
Epoch 30
-------------------------
[    0] loss: 0.018
Avg loss: 0.024886 
 
Test accuracy: 65.2%
Done

四、结果分析

import numpy as np
import matplotlib.pyplot as plt

x = [i for i in range(1,31)]

plt.plot(x, test_loss_list, label="Loss", alpha=0.8)

plt.xlabel("Epoch")
plt.ylabel("Loss")

plt.legend()
plt.show()

在这里插入图片描述

五、总结

本周主要实现了车牌识别任务。

  1. 其中在标签数字化时学习使用了one-hot编码,将离散特征推广到欧氏空间,从而使得特征之间的距离计算更加合理。
  2. 学习了搭建Mydataset的方法
  3. 在计算测试集准确率时由于pred和y的shape均为[batch, 7, 69],所以在进行计算之前要注意将y转换标签类别,因为pred在argmax之后shape为[batch, 7],而y要与pred进行argmax之后得到的tensor进行比较,所以y的shape需要与pred在argmax之后的shape相同。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值