1 小序
1.0 环境
- Ubuntu18.04
- Tensorflow(cpu)1.12.0
- python3.x
- OpenCV4.0.0
1.2 项目目录
lpr_tensorflow
├── font
├── genplate.py
├── images
├── input_data.py
├── logs
├── model
├── network_model.py
├── NoPlates
├── plate_generate.py
├── __pycache__
└── train_model.py
其中font,images,NoPlates从项目获取https://github.com/szad670401/end-to-end-for-chinese-plate-recognition,genplate.py文件使用下面的即可.
1 生成车牌数据
1.0 生成图像函数
【Demo:genplate.py】
#coding=utf-8
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import cv2
import numpy as np
import os
from math import *
from six.moves import xrange
index = {"京": 0, "沪": 1, "津": 2, "渝": 3, "冀": 4, "晋": 5, "蒙": 6, "辽": 7, "吉": 8, "黑": 9, "苏": 10, "浙": 11, "皖": 12,
"闽": 13, "赣": 14, "鲁": 15, "豫": 16, "鄂": 17, "湘": 18, "粤": 19, "桂": 20, "琼": 21, "川": 22, "贵": 23, "云": 24,
"藏": 25, "陕": 26, "甘": 27, "青": 28, "宁": 29, "新": 30, "0": 31, "1": 32, "2": 33, "3": 34, "4": 35, "5": 36,
"6": 37, "7": 38, "8": 39, "9": 40, "A": 41, "B": 42, "C": 43, "D": 44, "E": 45, "F": 46, "G": 47, "H": 48,
"J": 49, "K": 50, "L": 51, "M": 52, "N": 53, "P": 54, "Q": 55, "R": 56, "S": 57, "T": 58, "U": 59, "V": 60,
"W": 61, "X": 62, "Y": 63, "Z": 64}
chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
"琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
"B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
"Y", "Z"
]
def AddSmudginess(img, Smu):
rows = r(Smu.shape[0] - 50)
cols = r(Smu.shape[1] - 50)
adder = Smu[rows:rows + 50, cols:cols + 50]
adder = cv2.resize(adder, (50, 50))
# adder = cv2.bitwise_not(adder)
img = cv2.resize(img,(50,50))
img = cv2.bitwise_not(img)
img = cv2.bitwise_and(adder, img)
img = cv2.bitwise_not(img)
return img
def rot(img,angel,shape,max_angel):
size_o = [shape[1],shape[0]]
size = (shape[1]+ int(shape[0]*cos((float(max_angel )/180) * 3.14)),shape[0])
interval = abs( int( sin((float(angel) /180) * 3.14)* shape[0]))
pts1 = np.float32([[0,0] ,[0,size_o[1]],[size_o[0],0],[size_o[0],size_o[1]]])
if(angel>0):
pts2 = np.float32([[interval,0],[0,size[1] ],[size[0],0 ],[size[0]-interval,size_o[1]]])
else:
pts2 = np.float32([[0,0],[interval,size[1] ],[size[0]-interval,0 ],[size[0],size_o[1]]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,size)
return dst
def rotRandrom(img, factor, size):
shape = size
pts1 = np.float32([[0, 0], [0, shape[0]], [shape[1], 0], [shape[1], shape[0]]])
pts2 = np.float32([[r(factor), r(factor)], [ r(factor), shape[0] - r(factor)], [shape[1] - r(factor), r(factor)],
[shape[1] - r(factor), shape[0] - r(factor)]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(img, M, size)
return dst
def tfactor(img):
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hsv[:,:,0] = hsv[:,:,0]*(0.8+ np.random.random()*0.2)
hsv[:,:,1] = hsv[:,:,1]*(0.3+ np.random.random()*0.7)
hsv[:,:,2] = hsv[:,:,2]*(0.2+ np.random.random()*0.8)
img = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
return img
def random_envirment(img,data_set):
index=r(len(data_set))
env = cv2.imread(data_set[index])
env = cv2.resize(env,(img.shape[1],img.shape[0]))
bak = (img==0)
bak = bak.astype(np.uint8)*255
inv = cv2.bitwise_and(bak,env)
img = cv2.bitwise_or(inv,img)
return img
def GenCh(f,val):
img=Image.new("RGB", (45,70),(255,255,255))
draw = ImageDraw.Draw(img)
draw.text((0, 3),val,(0,0,0),font=f)
img = img.resize((23,70))
A = np.array(img)
return A
def GenCh1(f,val):
img=Image.new("RGB", (23,70),(255,255,255))
draw = ImageDraw.Draw(img)
# draw.text((0, 2),val.decode('utf-8'),(0,0,0),font=f)
draw.text((0, 2),val,(0,0,0),font=f)
A = np.array(img)
return A
def AddGauss(img, level):
return cv2.blur(img, (level * 2 + 1, level * 2 + 1))
def r(val):
return int(np.random.random() * val)
def AddNoiseSingleChannel(single):
diff = 255-single.max()
noise = np.random.normal(0,1+r(6),single.shape)
noise = (noise - noise.min())/(noise.max()-noise.min())
noise= diff*noise
noise= noise.astype(np.uint8)
dst = single + noise
return dst
def addNoise(img,sdev = 0.5,avg=10):
img[:,:,0] = AddNoiseSingleChannel(img[:,:,0])
img[:,:,1] = AddNoiseSingleChannel(img[:,:,1])
img[:,:,2] = AddNoiseSingleChannel(img[:,:,2])
return img
class GenPlate:
def __init__(self,fontCh,fontEng,NoPlates):
self.fontC = ImageFont.truetype(fontCh,43,0)
self.fontE = ImageFont.truetype(fontEng,60,0)
self.img=np.array(Image.new("RGB", (226,70),(255,255,255)))
self.bg = cv2.resize(cv2.imread("./images/template.bmp"),(226,70))
self.smu = cv2.imread("./images/smu2.jpg")
self.noplates_path = []
for parent,parent_folder,filenames in os.walk(NoPlates):
for filename in filenames:
path = parent+"/"+filename
self.noplates_path.append(path)
def draw(self,val):
offset= 2
self.img[0:70,offset+8:offset+8+23]= GenCh(self.fontC,val[0])
self.img[0:70,offset+8+23+6:offset+8+23+6+23]= GenCh1(self.fontE,val[1])
for i in range(5):
base = offset+8+23+6+23+17 +i*23 + i*6
self.img[0:70, base : base+23]= GenCh1(self.fontE,val[i+2])
return self.img
def generate(self,text):
if len(text) == 7:
# fg = self.draw(text.decode(encoding="utf-8"))
fg = self.draw(text)
# print("fg: {}".format(fg))
fg = cv2.bitwise_not(fg)
com = cv2.bitwise_or(fg,self.bg)
com = rot(com,r(60)-30,com.shape,30)
com = rotRandrom(com,10,(com.shape[1],com.shape[0]))
com = tfactor(com)
com = random_envirment(com,self.noplates_path)
com = AddGauss(com, 1+r(4))
com = addNoise(com)
return com
def genPlateString(self,pos,val):
plateStr = ""
box = [0,0,0,0,0,0,0]
if(pos!=-1):
box[pos]=1
for unit,cpos in zip(box,xrange(len(box))):
if unit == 1:
plateStr += val
else:
if cpos == 0:
plateStr += chars[r(31)]
elif cpos == 1:
plateStr += chars[41+r(24)]
else:
plateStr += chars[31 + r(34)]
return plateStr
def genBatch(self, batchSize,pos,charRange, outputPath,size):
if (not os.path.exists(outputPath)):
os.mkdir(outputPath)
l_plateStr = []
l_plateImg = []
for i in range(batchSize):
plateStr = G.genPlateString(-1,-1)
# print("plate string: {}".format(plateStr))
# print("length of plate: {}".format(len(plateStr)))
# print("type of plate: {}".format(type(plateStr)))
img = G.generate(plateStr)
img = cv2.resize(img,size)
filename = os.path.join(outputPath, str(i).zfill(4) + '.' + plateStr + ".jpg")
# cv2.imwrite(filename, img)
cv2.imwrite(filename, img)
l_plateStr.append(plateStr)
l_plateImg.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
return l_plateStr, l_plateImg
1.2 生成训练数据
【Demo:input_data.py】
import numpy as np
import cv2
from genplate import *
#产生用于训练的数据
class OCRIter():
def __init__(self,batch_size,height,width):
super(OCRIter, self).__init__()
self.genplate = GenPlate("./font/platech.ttf",'./font/platechar.ttf','./NoPlates')
self.batch_size = batch_size
self.height = height
self.width = width
def iter(self):
data = []
label = []
for i in range(self.batch_size):
num, img = gen_sample(self.genplate, self.width, self.height)
data.append(img)
label.append(num)
data_all = data
label_all = label
return data_all,label_all
def rand_range(lo,hi):
return lo+r(hi-lo)
def gen_rand():
name = ""
label=[]
label.append(rand_range(0,31)) #产生车牌开头32个省的标签
label.append(rand_range(41,65)) #产生车牌第二个字母的标签
for i in range(5):
label.append(rand_range(31,65)) #产生车牌后续5个字母的标签
name+=chars[label[0]]
name+=chars[label[1]]
for i in range(5):
name+=chars[label[i+2]]
return name,label
def gen_sample(genplate, width, height):
num,label =gen_rand()
img = genplate.generate(num)
# print("img: {}".format(img))
img = cv2.resize(img,(width,height))
img = np.multiply(img,1/255.0) #[height,width,channel]
#img = img.transpose(2,0,1)
#img = img.transpose(1,0,2)
return label,img #返回的label为标签,img为深度为3的图像像素
2 搭建神经网络
【Demo:network_model.py】
import tensorflow as tf
def init_weights_biases(name_w, name_b, shape):
weights = tf.get_variable(name=name_w, shape=shape, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
biases = tf.get_variable(name=name_b, shape=[shape[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
return weights, biases
def conv2d(input_tensor, ksize, strides, pad, name_w, name_b):
weights = tf.get_variable(name=name_w, shape=ksize, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
biases = tf.get_variable(name=name_b, shape=[ksize[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(input_tensor, weights, strides=strides, padding=pad)
conv = tf.nn.relu(conv + biases)
return conv
def max_pooling(input_tensor, ksize, strides, pad):
max_pool = tf.nn.max_pool(input_tensor, ksize=ksize, strides=strides, padding=pad)
return max_pool
def fullc(input_tensor, wsize, name_w, name_b):
weights = tf.get_variable(name=name_w, shape=wsize, dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=0.1))
biases = tf.get_variable(name=name_b, shape=[wsize[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0.1))
fullc = tf.matmul(input_tensor, weights) + biases
return fullc
def small_basic_block(input_tensor, ksize, strides, pad):
conv_s1 = conv2d(input_tensor, [ksize[0],ksize[1],ksize[2],ksize[2]/4], strides, pad, "a", "b")
conv_s2 = conv2d(conv_s1, [3,1,ksize[2]/4,ksize[2]/4], strides, pad, "c", "d")
conv_s3 = conv2d(conv_s2, [1,3,ksize[2]/4,ksize[2]/4], strides, pad, "e", "f")
conv_s4 = conv2d(conv_s3 , [ksize[0],ksize[1],ksize[2]/4,ksize[2]], strides, pad, "g", "h")
return conv_s4
def inference(inputs, keep_prob):
with tf.name_scope("conv_1"):
'''output data:[batch_size, 36, 136, 64]'''
conv_1 = conv2d(inputs, [3,3,3,32], [1,1,1,1], "VALID", "cw_1", "cb_1")
with tf.name_scope("conv_2"):
'''output data:[batch_size, 36, 136, 64]'''
conv_2 = conv2d(conv_1, [3,3,32,32], [1,1,1,1], "VALID", "cw_2", "cb_2")
with tf.name_scope("max_pool_1"):
'''output data:[batch_size, 36, 136, 64]'''
pooling_1 = max_pooling(conv_2, [1,2,2,1], [1,2,2,1], "VALID")
with tf.name_scope("conv_3"):
'''output data:[batch_size, 36, 136, 64]'''
conv_3 = conv2d(pooling_1, [3,3,32,64], [1,1,1,1], "VALID", "cw_3", "cb_3")
with tf.name_scope("conv_4"):
'''output data:[batch_size, 36, 136, 64]'''
conv_4 = conv2d(conv_3, [3,3,64,64], [1,1,1,1], "VALID", "cw_4", "cb_4")
with tf.name_scope("max_pool_2"):
'''output data:[batch_size, 36, 136, 64]'''
pooling_2 = max_pooling(conv_4, [1,2,2,1], [1,2,2,1], "VALID")
with tf.name_scope("conv_5"):
'''output data:[batch_size, 36, 136, 64]'''
conv_5 = conv2d(pooling_2, [3,3,64,128], [1,1,1,1], "VALID", "cw_5", "cb_5")
with tf.name_scope("conv_6"):
'''output data:[batch_size, 36, 136, 64]'''
conv_6 = conv2d(conv_5, [3,3,128, 128], [1,1,1,1], "VALID", "cw_6", "cb_6")
with tf.name_scope("max_pool_3"):
'''output data:[batch_size, 36, 136, 64]'''
pooling_3 = max_pooling(conv_6, [1,2,2,1], [1,2,2,1], "VALID")
with tf.name_scope("fullc_1"):
output_shape = pooling_3.get_shape()
flatten_1 = output_shape[1].value*output_shape[2].value*output_shape[3].value
reshape_output = tf.reshape(pooling_3, [-1, flatten_1])
fc_1 = tf.nn.dropout(reshape_output, keep_prob)
with tf.name_scope("fullc_21"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_21 = fullc(fc_1, [flatten, 65], "fw2_1", "fb2_1")
with tf.name_scope("fullc_22"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_22 = fullc(fc_1, [flatten, 65], "fw2_2", "fb2_2")
with tf.name_scope("fullc_23"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_23 = fullc(fc_1, [flatten, 65], "fw2_3", "fb2_3")
with tf.name_scope("fullc_24"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_24 = fullc(fc_1, [flatten, 65], "fw2_4", "fb2_4")
with tf.name_scope("fullc_25"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_25 = fullc(fc_1, [flatten, 65], "fw2_5", "fb2_5")
with tf.name_scope("fullc_26"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_26 = fullc(fc_1, [flatten, 65], "fw2_6", "fb2_6")
with tf.name_scope("fullc_27"):
# flatten = output_shape[1].value*output_shape[2].value*output_shape[3].value
flatten = reshape_output.get_shape()[-1].value
fc_27 = fullc(fc_1, [flatten, 65], "fw2_7", "fb2_7")
return fc_21, fc_22, fc_23, fc_24, fc_25, fc_26, fc_27
def losses(logits_1, logits_2, logits_3, logits_4,logits_5, logits_6, logits_7, labels):
labels = tf.convert_to_tensor(labels, tf.int32)
with tf.name_scope("loss_1"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_1, labels=labels[:,0])
loss_1 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_1", loss_1)
with tf.name_scope("loss_2"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_2, labels=labels[:,1])
loss_2 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_2", loss_2)
with tf.name_scope("loss_3"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_3, labels=labels[:,2])
loss_3 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_3", loss_3)
with tf.name_scope("loss_4"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_4, labels=labels[:,3])
loss_4 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_4", loss_4)
with tf.name_scope("loss_5"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_5, labels=labels[:,4])
loss_5 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_5", loss_5)
with tf.name_scope("loss_6"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_6, labels=labels[:,5])
loss_6 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_6", loss_6)
with tf.name_scope("loss_7"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_7, labels=labels[:,6])
loss_7 = tf.reduce_mean(cross_entropy)
tf.summary.scalar("loss_7", loss_7)
return loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7
def train(loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7, learning_rate):
with tf.name_scope("optimizer_1"):
train_op_1 = tf.train.AdamOptimizer(learning_rate).minimize(loss_1)
with tf.name_scope("optimizer_2"):
train_op_2 = tf.train.AdamOptimizer(learning_rate).minimize(loss_2)
with tf.name_scope("optimizer_3"):
train_op_3 = tf.train.AdamOptimizer(learning_rate).minimize(loss_3)
with tf.name_scope("optimizer_4"):
train_op_4 = tf.train.AdamOptimizer(learning_rate).minimize(loss_4)
with tf.name_scope("optimizer_5"):
train_op_5 = tf.train.AdamOptimizer(learning_rate).minimize(loss_5)
with tf.name_scope("optimizer_6"):
train_op_6 = tf.train.AdamOptimizer(learning_rate).minimize(loss_6)
with tf.name_scope("optimizer_7"):
train_op_7 = tf.train.AdamOptimizer(learning_rate).minimize(loss_7)
return train_op_1, train_op_2, train_op_3, train_op_4, train_op_5, train_op_6, train_op_7
def evaluation(logits_1, logits_2, logits_3, logits_4,logits_5, logits_6, logits_7, labels):
'''shape:(8,65)'''
# print("shape of logits_1: {}".format(logits_1.shape))
'''shape all logits:(7,8,65)'''
'''shape: (56, 65)'''
logits_all = tf.concat([logits_1, logits_2, logits_3, logits_4,logits_5, logits_6, logits_7], 0)
# print("shape of logits all: {}".format(logits_all.shape))
'''shape: (8,7)'''
labels = tf.convert_to_tensor(labels, tf.int32)
'''shape: (56, 1)'''
labels_all = tf.reshape(tf.transpose(labels), [-1])
# print("shape of labels all: {}".format(labels_all.shape))
with tf.name_scope("accuracy"):
correct = tf.nn.in_top_k(logits_all, labels_all, 1)
correct = tf.cast(correct, tf.float16)
accuracy = tf.reduce_mean(correct)
tf.summary.scalar("accuracy", accuracy)
return accuracy
3 训练神经网络
【Demo:train_model.py】
import tensorflow as tf
import network_model
from input_data import OCRIter
import numpy as np
import os
batch_size = 8
image_h = 72
image_w = 272
learning_rate = 0.0001
count = 30000
num_label = 7
channels = 3
LOG_DIR = "./logs"
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
with tf.name_scope("source_data"):
'''output data:[batch_size, 36, 136, 3]
[batch_size, height, width, channels]
'''
inputs = tf.placeholder(tf.float32, [batch_size, image_h, image_w, channels], name="inputs")
labels = tf.placeholder(tf.int32, [batch_size, num_label], name="labels")
keep_prob = tf.placeholder(tf.float32)
def get_batch():
data_batch = OCRIter(batch_size, image_h, image_w)
image_batch, label_batch = data_batch.iter()
image_batch = np.array(image_batch)
label_batch = np.array(label_batch)
return image_batch, label_batch
train_logits_1, train_logits_2, train_logits_3, train_logits_4, \
train_logits_5, train_logits_6, train_logits_7 = network_model.inference(inputs, keep_prob)
train_loss_1, train_loss_2, train_loss_3, train_loss_4, \
train_loss_5, train_loss_6, train_loss_7 = network_model.losses(train_logits_1, train_logits_2, train_logits_3, train_logits_4, \
train_logits_5, train_logits_6, train_logits_7, labels)
train_op_1, train_op_2, train_op_3, train_op_4, \
train_op_5, train_op_6, train_op_7 = network_model.train(train_loss_1, train_loss_2, train_loss_3, train_loss_4, \
train_loss_5, train_loss_6, train_loss_7, learning_rate)
train_acc = network_model.evaluation(train_logits_1, train_logits_2, train_logits_3, train_logits_4, \
train_logits_5, train_logits_6, train_logits_7, labels)
summary_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
# summary_op = tf.summary.merge_all(tf.get_collection(tf.GraphKeys.SUMMARIES))
if __name__ == "__main__":
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
summary_writer = tf.summary.FileWriter(LOG_DIR, sess.graph)
# x_batch, y_batch = get_batch()
# print("data:{}, label: {}".format(x_batch, y_batch))
for step in range(count):
x_batch, y_batch = get_batch()
# print("data:{}, label: {}".format(type(x_batch), type(y_batch)))
feed_dict = {inputs: x_batch, labels: y_batch, keep_prob: 0.5}
_, _, _, _, _, _, _, loss_1, loss_2, loss_3, loss_4, \
loss_5, loss_6, loss_7, acc, summary = sess.run([train_op_1,\
train_op_2, train_op_3, train_op_4, train_op_5, train_op_6, train_op_7,\
train_loss_1, train_loss_2, train_loss_3, train_loss_4, \
train_loss_5, train_loss_6, train_loss_7, train_acc, summary_op], \
feed_dict=feed_dict)
loss_all = loss_1 + loss_2 + loss_3 + loss_4 + \
loss_5 + loss_6 + loss_7
ckpt_dir = "./model/lpr.ckpt"
if not os.path.exists("./model"):
os.makedirs("./model")
if step % 10 == 0:
print("loss1:{}, loss2:{}, loss3:{}, loss4:{}, loss5:{}, loss6:{}, loss7: {}".format(loss_1, loss_2, loss_3, loss_4, loss_5, loss_6, loss_7))
print("Total loss: {}, accuracy: {}".format(loss_all, acc))
if step % 1000 == 0 or (step+1) == count:
saver.save(sess, save_path=ckpt_dir, global_step=step)
summary_writer.add_summary(summary, step)
coord.request_stop()
coord.join(threads)
summary_writer.close()
4 总结
(1) 车牌长度:python2中为9,python3中为7;
(2) 代码解析后续更新;
【参考文献】
[1]https://blog.csdn.net/ssmixi/article/details/78220039
[2]https://blog.csdn.net/ssmixi/article/details/78223907
[3]https://cloud.tencent.com/developer/article/1005199
[4]https://ypwhs.github.io/captcha/