系列文章目录
这个是最近参加比赛,要做的车牌识别系统,单单对车牌识别,不用对车牌进行定位。
用opencv的截图功能,将每一个字符分割下来,然后放入神经网络进行特征提取。
大致内容
1.制作数据集
2CNN网络搭建
3车牌图片处理
4中文省号预测
5英文字符,和数字预测
一、工具
使用tensorflow和opencv的基础函数
二、使用步骤
1.引入库
#所涉及的库
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model
import tensorflow as tf
from PIL import Image
import numpy as np
import os
import cv2
import time
2.制作数据集
#分成两次进行训练,和保存模板,一次是对省份进行训练,一次是对26个字母,和数字进行训练
province = {
1 : "藏" , 2 : "川" , 3 : "滇" , 4 : "鄂" , 5 :"甘", 6 :"赣", 7 : "贵" , 8 : "桂" , 9 : "黑" , 10 : "沪",
11 : "吉", 12 : "冀", 13 : "津", 14 : "晋" , 15 : "京" , 16 : "辽" , 17 : "鲁" , 18 : "蒙" , 19 : "闽" , 20 : "宁" ,
21 : "青" , 22 : "琼" , 23 :"陕", 24 : "苏" , 25 : "台" , 26 : "皖", 27 : "湘" , 28 : "新" , 29 : "渝" ,30 :"豫" ,
31 : "粤" ,32:"浙"}
apla={
10 :"A" , 11 : "B" , 12 : "C" , 13 : "D" , 14 : "E" , 15 : "F" , 16 : "G" , 17 : "H" ,
18 : "I" , 19 : "J" , 20 : "K" , 21 : "L" , 22 : "M" , 23 : "N" , 24 : "O" , 25 : "P" , 26 : "Q" , 27 : "R" ,
28 : "S" , 29 : "T" , 30 : "U" , 31 : "V" , 32 : "W" , 33 : "X" , 34 : "Y" , 35 : "Z" ,
0 : "0" , 1 : "1" , 2 : "2" , 3 : "3" , 4 : "4" ,
5 : "5" , 6 : "6" , 7 : "7" , 8 : "8" , 9 : "9"
}
#八个文件路径,train_path和test_path为训练和测试的图片路径,txt为对于的标签对,每一个标签对应一个值
#x_train_savepathh和y_train_savepath为模型训练后保存的路径,开始时是没有的。
train_path = r'C:\Users\28606\PycharmProjects\pythonProject\car_number_detect\number_data_picture_train/'
train_txt = r'C:\Users\28606\PycharmProjects\pythonProject\car_number_detect\number_train.txt'
x_train_savepath = 'car_number_detect/number_x_train.npy'
y_train_savepath = 'car_number_detect/number_y_train.npy'
test_path = r'C:\Users\28606\PycharmProjects\pythonProject\car_number_detect\number_data_picture_test/'
test_txt = r'C:\Users\28606\PycharmProjects\pythonProject\car_number_detect\number_test.txt'
x_test_savepath = 'car_number_detect/number_x_test.npy'
y_test_savepath = 'car_number_detect/number_y_test.npy'
###################################load data##############################
def generateds(path, txt):
f = open(txt, 'r') # 以只读形式打开txt文件
contents = f.readlines() # 读取文件中所有行
f.close() # 关闭txt文件
x, y_ = [], [] # 建立空列表
for content in contents: # 逐行取出
value = content.split() # 以空格分开,图片路径为value[0] , 标签为value[1] , 存入列表
#value[0]为图片的路径如:1.jpg,不是绝对路径,
img_path = path + value[0] # 拼出图片路径和文件名,
#img_path此时为绝对路径
img = Image.open(img_path) # 读入图片
img = img.resize((28, 28), Image.ANTIALIAS)#改变图片的大小,Image.ANTIALIAS是使图像平滑的操作
img = np.array(img.convert('L')) # 图片变为8位宽灰度值的np.array格式
for i in range(28):#对28行28列的像素值,转化为白底黑字的图片
for j in range(28):
if img[i][j] < 200:
img[i][j] = 255
else:
img[i][j] = 0
img = img / 255.0 # 数据归一化 (实现预处理)
x.append(img) # 归一化后的数据,贴到列表x
y_.append(value[1]) # 标签贴到列表y_
print('loading : ' + content) # 打印状态提示
x = np.array(x) # 变为np.array格式
y_ = np.array(y_) # 变为np.array格式
y_ = y_.astype(np.int64) # 变为64位整
return x, y_ # 返回输入特征x,返回标签y_
#判断路径是否存在,存在就会直接加载数据集,
if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(
x_test_savepath) and os.path.exists(y_test_savepath):
print('-------------Load Datasets-----------------')
x_train_save = np.load(x_train_savepath)
y_train = np.load(y_train_savepath)
x_test_save = np.load(x_test_savepath)
y_test = np.load(y_test_savepath)
x_train = np.reshape(x_train_save, (len(x_train_save), 28, 28, 1))#让训练和测试集的维度符合卷积网络的结构,为4维
x_test = np.reshape(x_test_save, (len(x_test_save), 28, 28, 1))
#不存在就生成数据集
else:
print('-------------Generate Datasets-----------------')
x_train, y_train = generateds(train_path, train_txt)
x_test, y_test = generateds(test_path, test_txt)
#生成后保存数据集
print('-------------Save Datasets-----------------')
x_train_save = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test_save = np.reshape(x_test, (len(x_test), 28, 28, 1))
np.save(x_train_savepath, x_train_save)
np.save(y_train_savepath, y_train)
np.save(x_test_savepath, x_test_save)
np.save(y_test_savepath, y_test)
################数据打乱###################
#将数据集都打乱保证数据是随机的
np.random.seed(7)
np.random.shuffle(x_train)
np.random.seed(7)
np.random.shuffle(y_train)
tf.random.set_seed(7)
3.用class搭建的网络结构3个卷积,3个全连接层,1层拉直层
卷积层就是CPABD特征提取器。
######################################CNN网络结构######################################
class netmodel(Model):
def __init__(self):
super(netmodel, self).__init__()
self.c1=Conv2D(filters=32,kernel_size=(3,3),padding='same')
self.b1=BatchNormalization()
self.a1=Activation('relu')
self.p1=MaxPool2D(pool_size=(2,2),strides=2,padding='same')
self.d1=Dropout(0.2)
self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')
self.b2 = BatchNormalization()
self.a2 = Activation('relu')
self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same'