使用Lenet5卷积神经网络,简单识别车牌

前言

最近课上用到卷积神经网络搭建车牌识别,所以花费了很多精力进行这个学习。lenet是1987的神经网络,相对简单,没有dropout层,所以使用这个新手搭起来比较简单。

主要python文件有两个NeuralNetworkConstruction.py,Detected.py。

NeuralNetworkConstruction.py主要负责数据的预处理,神经网络的搭建。lenet5网络有两个卷积层,两个池化层,一个拉直层和3个全连接层。

开始训练

训练时,使用的是百度AI平台上的训练数据,data文件夹已分类好各种字符,像素为20×20的灰度图像。NeuralNetworkConstruction.py中,读取图片集并写出训练名单的代码,生成了所需的训练集名单和测试集名单。每隔10张,训练集一张图片放入测试集。
需要使用时,将图片数据生成npy,若没有,则使用generateds函数生成。

神经网络的搭建,我是用class搭建法。第一层卷积层为6个5×5的卷积核,激活函数sigmoid。池化层第一层步长为2。第二层卷积层为16个5×5的卷积核,激活函数sigmoid。池化层第二层步长为2。然后进入拉直层,再经历3个全连接层。如果大家的训练图片大小或者类型不同,可深入理解lenet每一层的参数进行修改。由于我车牌识别共用到65个字符的识别,所以最后一层使用了softmax输出。

训练完模型后,将模型断点保存下来,下一次可以从这加载。最后可以针对模型的准确度和loss进行输出

import tensorflow as tf
import os
import numpy as np
from matplotlib import pyplot as plt
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model
from PIL import Image
import cv2

np.set_printoptions(threshold=np.inf)


# ==========================================读取图片集并写出训练名单
data_path = './characterData/data'
character_folders = os.listdir(data_path)
label = 0
LABEL_temp = {}
if(os.path.exists('train_data.list')):                          #有则删除,无则生成txt
    os.remove('train_data.list')
if(os.path.exists('test_data.list')):
    os.remove('test_data.list')
for character_folder in character_folders:
    with open('train_data.list', 'a') as f_train:
        with open('test_data.list', 'a') as f_test:
            if character_folder == '.DS_Store' or character_folder == '.ipynb_checkpoints' or character_folder == 'data23617':
                continue
            print(character_folder + " " + str(label))          #label是自增的,和在data中的文件夹的先后关系对应
            LABEL_temp[str(label)] = character_folder #存储一下标签的对应关系
            character_imgs = os.listdir(os.path.join(data_path, character_folder))
            for i in range(len(character_imgs)):
                if i%10 == 0:
                    f_test.write(os.path.join(os.path.join(data_path, character_folder), character_imgs[i]) + "@" + str(label) + '\n')
                else:
                    f_train.write(os.path.join(os.path.join(data_path, character_folder), character_imgs[i]) + "@" + str(label) + '\n')
    label = label + 1
print('图像列表已生成')


#=============================================加载自制数据集部分

train_path = ''                #txt中路径齐全,所以将这个改成空串,方便运行
train_txt = './train_data.list'     #写了训练图片对应标签路径的txt
x_train_savepath = './mnist_image_label/mnist_x_train.npy'
y_train_savepath = './mnist_image_label/mnist_y_train.npy'

test_path = ''
test_txt = './test_data.list'
x_test_savepath = './mnist_image_label/mnist_x_test.npy'
y_test_savepath = './mnist_image_label/mnist_y_test.npy'


def generateds(path, txt):                          #将图片和标签分别读入x,y_,返回
    f = open(txt, 'r')  # 以只读形式打开txt文件
    contents = f.readlines()  # 读取文件中所有行
    f.close()  # 关闭txt文件
    x, y_ = [], []  # 建立空列表
    for content in contents:  # 逐行取出
        value = content.split("@")  # 以空格分开,图片路径为value[0] , 标签为value[1] , 存入列表
        img_path = path + value[0]  # 拼出图片路径和文件名
        print(img_path)                 #调试,打印出文件路径
        print("value[1]:",value[1])         #调试,打印出标签
        img = Image.open(img_path)  # 读入图片
        img = np.array(img.convert('RGB'))  # 图片变为8位宽灰度值的np.array格式
        img = cv2.resize(img,(32, 32))
        img = img / 255.  # 数据归一化 (实现预处理)
        x.append(img)  # 归一化后的数据,贴到列表x
        y_.append(value[1])  # 标签贴到列表y_
        print('loading : ' + content)  # 打印状态提示

    x = np.array(x)  # 变为np.array格式
    y_ = np.array(y_)  # 变为np.array格式
    y_ = y_.astype(np.int64)  # 变为64位整型
    return x, y_  # 返回输入特征x,返回标签y_


if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(            #加载数据集,若没有,就调用generateds,生成
        x_test_savepath) and os.path.exists(y_test_savepath):
    print('-------------Load Datasets-----------------')
    x_train_save = np.load(x_train_savepath)
    y_train = np.load(y_train_savepath)
    x_test_save = np.load(x_test_savepath)
    y_test = np.load(y_test_savepath)
    x_train = np.reshape(x_train_save, (len(x_train_save), 32, 32))
    x_test = np.reshape(x_test_save, (len(x_test_save), 32, 32))
else:
    print('-------------Generate Datasets-----------------')
    x_train, y_train = generateds(train_path, train_txt)
    x_test, y_test = generateds(test_path, test_txt)

    print('-------------Save Datasets-----------------')
    x_train_save = np.reshape(x_train, (len(x_train), -1))
    x_test_save = np.reshape(x_test, (len(x_test), -1))
    np.save(x_train_savepath, x_train_save)
    np.save(y_train_savepath, y_train)
    np.save(x_test_savepath, x_test_save)
    np.save(y_test_savepath, y_test)


#===============================================神经网络搭建部分
class LeNet5(Model):
    def __init__(self):
        super(LeNet5, self).__init__()
        self.c1 = Conv2D(filters=6, kernel_size=(5, 5),
                         activation='sigmoid')
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.c2 = Conv2D(filters=16, kernel_size=(5, 5),
                         activation='sigmoid')
        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.flatten = Flatten()
        self.f1 = Dense(120, activation='sigmoid')
        self.f2 = Dense(84, activation='sigmoid')
        self.f3 = Dense(65, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.p2(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.f2(x)
        y = self.f3(x)
        return y


model = LeNet5()

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

checkpoint_save_path = "./checkpoint/LeNet5.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)

cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,
                                                 save_best_only=True)

print("x_train.shape", x_train.shape)
history = model.fit(x_train, y_train, batch_size=32, epochs=10, validation_data=(x_test, y_test), validation_freq=1,
                    callbacks=[cp_callback])
model.summary()

# print(model.trainable_variables)
file = open('./weights.txt', 'w')
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

###############################################    show   ###############################################

# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

开始识别

使用模型进行识别的代码Detected.py。读取了所需识别的车牌照片,灰度处理后,进行二值化处理。然后对图片进行切割成每个字符。将每个字符保存DivideFromPhoto里面。

接下来复现模型,加载参数。由于我这个网络原型是用于处理cifar数据集的识别。而cifar是像素为32×32,RGB图像,所以我为了尽快使用,将图片变成和cifar一样。使用labels数组,对识别出的数据保存。建立match数组,使特征和字符对应起来,最后输出

Detected.py:

import tensorflow as tf
import os
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model
import cv2
import paddle as paddle

 # ================================================对车牌图片进行处理,分割出车牌中的每一个字符并保存
license_plate = cv2.imread('./palate.png')                                      #加载需要识别的车牌图片
gray_plate = cv2.cvtColor(license_plate, cv2.COLOR_RGB2GRAY)
ret, binary_plate = cv2.threshold(gray_plate, 175, 255, cv2.THRESH_BINARY)
result = []
for col in range(binary_plate.shape[1]):
    result.append(0)
    for row in range(binary_plate.shape[0]):
        result[col] = result[col] + binary_plate[row][col] / 255
character_dict = {}
num = 0
i = 0
while i < len(result):
    if result[i] == 0:
        i += 1
    else:
        index = i + 1
        while result[index] != 0:
            index += 1
        character_dict[num] = [i, index - 1]
        num += 1
        i = index

for i in range(8):
    if i == 2:
        continue
    padding = (170 - (character_dict[i][1] - character_dict[i][0])) / 2
    ndarray = np.pad(binary_plate[:, character_dict[i][0]:character_dict[i][1]], ((0, 0), (int(padding), int(padding))),
                     'constant', constant_values=(0, 0))
    ndarray = cv2.resize(ndarray, (20, 20))
    cv2.imwrite('./' +'/DivideFromPhoto/'+ str(i) + '.png', ndarray)                #将分割的字符写入DivideFromPhoto文件夹中


#==============================================================================加载模型
model_save_path = 'C:/Users/US/Downloads/Pai Shibie/checkpoint/LeNet5.ckpt'
class LeNet5(Model):
    def __init__(self):
        super(LeNet5, self).__init__()
        self.c1 = Conv2D(filters=6, kernel_size=(5, 5),
                         activation='sigmoid')
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.c2 = Conv2D(filters=16, kernel_size=(5, 5),
                         activation='sigmoid')
        self.p2 = MaxPool2D(pool_size=(2, 2), strides=2)

        self.flatten = Flatten()
        self.f1 = Dense(120, activation='sigmoid')
        self.f2 = Dense(84, activation='sigmoid')
        self.f3 = Dense(65, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.p2(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.f2(x)
        y = self.f3(x)
        return y

model = LeNet5()
model.load_weights(model_save_path)

#===============================================================识别
character_folders = os.listdir('./DivideFromPhoto')
# for character_folder in character_folders:
#     print("character_folder",character_folder)
preNum=len(character_folders)

labels = []
for i in range(preNum):
    image_path =character_folders[i]
    img = Image.open('./DivideFromPhoto/'+image_path)                           #读取划分好的车牌字符
    img = img.resize((32, 32), Image.ANTIALIAS)             #将图片转为32,32,才能放入神经网络
    img_arr = np.array(img.convert('RGB'))              #将图片转为RGB,才能放入神经网络

    img_arr = img_arr / 255.0
    print("img_arr:", img_arr.shape)
    x_predict = img_arr[tf.newaxis, ...]                #tensor增加一维,用于输出预测结果
    print("x_predict:", x_predict.shape)
    result = model.predict(x_predict)                   #模型进行识别
    # print(result)
    pred = tf.argmax(result, axis=1)                    #横方向进行输出
    res=pred[0]                                 #取出结果
    result = res.numpy()                            #转化为numpy
    labels.append(result)
    # print("result:",result)                     #测试
    print("识别结果标签为:",res)                           #测试
    print("labels:",labels)                         #放入标签数组
    #tf.print(pred)


#车牌字符数组
match = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'A', 11: 'B', 12: 'C',
            13: '川', 14: 'D', 15: 'E', 16: '鄂',
            17: 'F', 18: 'G', 19: '赣', 20: '甘', 21: '贵', 22: '桂', 23: 'H', 24: '黑', 25: '沪', 26: 'J', 27: '冀', 28: '津',
            29: '京', 30: '吉', 31: 'K', 32: 'L', 33: '辽',
            34: '鲁', 35: 'M', 36: '蒙', 37: '闽', 38: 'N', 39: '宁', 40: 'P', 41: 'Q', 42: '青', 43: '琼', 44: 'R', 45: 'S',
            46: '陕', 47: '苏', 48: '晋', 49: 'T', 50: 'U',
            51: 'V', 52: 'W ', 53: '皖', 54: 'X', 55: '湘', 56: '新', 57: 'Y', 58: '豫', 59: '渝', 60: '粤', 61: '云', 62: 'Z',
            63: '藏', 64: '浙'
            }

print('\n车牌识别结果为:',end='')
for i in range(len(labels)):                    #对字符数组每一个元素与车牌字符数组匹配
    print(match[labels[i]], end='')

展开阅读全文

没有更多推荐了,返回首页

©️2019 CSDN 皮肤主题: 数字20 设计师: CSDN官方博客
应支付0元
点击重新获取
扫码支付

支付成功即可阅读