10_02车牌识别_tensorflow.py



import tensorflow as tf
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from genplate import GenPlate, gen_sample, chars
from utils.ops import *

def create_dir_path(path):
    if not os.path.exists(path):
        os.makedirs(path)


def read_tfrecord(filename, x_name='image', y_name='label', x_shape=[72, 272, 3], y_shape=[7], batch_size=64,
                  shuffle_data=False, num_threads=1):
    """
    读取TFRecord文件
    :param filename:
    :param x_name: 给定训练用x的名称
    :param y_name: 给定训练用y的名称
    :param x_shape: x的格式
    :param y_shape: y的格式
    :param batch_size: 批大小
    :param shuffle_data: 是否混淆数据,如果为True,那么进行shuffle操作
    :param num_threads: 线程数目
    :return:
    """
    # 获取队列
    filename_queue = tf.train.string_input_producer([filename])
    # 构建数据读取器
    reader = tf.TFRecordReader()
    # 读取队列中的数据
    _, serialized_example = reader.read(filename_queue)

    # 处理样本
    features = tf.parse_single_example(
        serialized_example,
        features={
            x_name: tf.FixedLenFeature([], tf.string),
            y_name: tf.FixedLenFeature([], tf.string)
        }
    )

    # 读取特征
    image = tf.decode_raw(features[x_name], tf.float32)
    label = tf.decode_raw(features[y_name], tf.int32)

    # 格式重定
    image = tf.reshape(image, x_shape)
    label = tf.reshape(label, y_shape)

    # 转换为批次的Tensor对象
    capacity = batch_size * 6 + 10
    if shuffle_data:
        image, label = tf.train.shuffle_batch([image, label], batch_size=batch_size, capacity=capacity,
                                              num_threads=num_threads, min_after_dequeue=int(capacity / 2))
    else:
        image, label = tf.train.batch([image, label], batch_size=batch_size, capacity=capacity, num_threads=num_threads)

    return image, label


def model_mobile_v2(inputs, num_classes, bn_train=True):
    """
    构建Mobile_Net_V2结构
    :param inputs:
    :param num_classes:
    :param bn_train:
    :return:
    """
    exp = 3 # 膨胀系数
    with tf.variable_scope('Mobile_V2', initializer=tf.truncated_normal_initializer(stddev=0.1)):
        net = conv2d_block(inputs, 32, 3, 1, bn_train, name='conv1_1')
        # [N, 72, 272, 32]

        net = res_block(net, 1, 16, 1, bn_train, name='res2_1')
        # [N, 72, 272, 16]

        net = res_block(net, exp, 24, 2, bn_train, name='res3_1')  # [N, 36, 136, 24]
        net = res_block(net, exp, 24, 1, bn_train, name='res3_2')

        net = res_block(net, exp, 32, 1, bn_train, name='res4_1')
        net = res_block(net, exp, 32, 1, bn_train, name='res4_2')
        net = res_block(net, exp, 32, 1, bn_train, name='res4_3')
        # # [N, 36, 136, 32]

        net = res_block(net, exp, 64, 2, bn_train, name='res5_1')
        net = res_block(net, exp, 64, 1, bn_train, name='res5_2')
        net = res_block(net, exp, 64, 1, bn_train, name='res5_3')
        net = res_block(net, exp, 64, 1, bn_train, name='res5_4')
        # # [N, 18, 68, 64]

        net = res_block(net, exp, 96, 1, bn_train, name='res6_1')
        net = res_block(net, exp, 96, 1, bn_train, name='res6_2')
        net = res_block(net, exp, 96, 1, bn_train, name='res6_3')
        # # [N, 18, 68, 96]

        net = res_block(net, exp, 160, 2, bn_train, name='res7_1')
        net = res_block(net, exp, 160, 1, bn_train, name='res7_2')
        net = res_block(net, exp, 160, 1, bn_train, name='res7_3')
        # [N, 9, 34, 160]

        net = res_block(net, exp, 320, 1, bn_train, name='res8_1')  # [N, 9, 34, 320]

        net = pwise_block(net, 1280, bn_train, name='conv9_1')  # [N, 9, 34, 1280]
        # 全局平均池化
        net = global_avg(net)  # [N, 1, 1, 1280]

        # 做7个模型
        with tf.variable_scope('fc21'):
            logit1 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))
        with tf.variable_scope('fc22'):
            logit2 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))
        with tf.variable_scope('fc23'):
            logit3 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))
        with tf.variable_scope('fc24'):
            logit4 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))
        with tf.variable_scope('fc25'):
            logit5 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))
        with tf.variable_scope('fc26'):
            logit6 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))
        with tf.variable_scope('fc27'):
            logit7 = flatten(conv_1x1(net, num_classes, bias=True, name='logits'))

        return logit1, logit2, logit3, logit4, logit5, logit6, logit7


def losses(logit1, logit2, logit3, logit4, logit5, logit6, logit7, labels):
    """

    :param logit1:  [N, 65]
    :param logit2:
    :param logit3:
    :param logit4:
    :param logit5:
    :param logit6:
    :param logit7:
    :param labels:
    :return:
    """
    labels = tf.convert_to_tensor(labels, tf.int32)

    with tf.variable_scope('loss1'):
        ce1 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit1, labels=labels[:, 0]
        )
        loss1 = tf.reduce_mean(ce1, name='loss1')
    with tf.variable_scope('loss2'):
        ce2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit2, labels=labels[:, 1]
        )
        loss2 = tf.reduce_mean(ce2, name='loss2')
    with tf.variable_scope('loss3'):
        ce3 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit3, labels=labels[:, 2]
        )
        loss3 = tf.reduce_mean(ce3, name='loss3')

    with tf.variable_scope('loss4'):
        ce4 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit4, labels=labels[:, 3]
        )
        loss4 = tf.reduce_mean(ce4, name='loss4')
    with tf.variable_scope('loss5'):
        ce5 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit5, labels=labels[:, 4]
        )
        loss5 = tf.reduce_mean(ce5, name='loss5')
    with tf.variable_scope('loss6'):
        ce6 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit6, labels=labels[:, 5]
        )
        loss6 = tf.reduce_mean(ce6, name='loss6')
    with tf.variable_scope('loss7'):
        ce7 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logit7, labels=labels[:, 6]
        )
        loss7 = tf.reduce_mean(ce7, name='loss7')
    return loss1, loss2,loss3,loss4,loss5,loss6,loss7

def create_optimizer(loss1, loss2,loss3,loss4,loss5,loss6,loss7, learning_rate):
    """
    构建模型优化器
    :param loss1:
    :param loss2:
    :param loss3:
    :param loss4:
    :param loss5:
    :param loss6:
    :param loss7:
    :param learning_rate:
    :return:
    """
    with tf.name_scope('optimizer1'):
        opt1 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o1 = opt1.minimize(loss1)
    with tf.name_scope('optimizer2'):
        opt2 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o2 = opt2.minimize(loss2)
    with tf.name_scope('optimizer3'):
        opt3 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o3 = opt3.minimize(loss3)
    with tf.name_scope('optimizer4'):
        opt4 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o4 = opt4.minimize(loss4)
    with tf.name_scope('optimizer5'):
        opt5 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o5 = opt5.minimize(loss5)
    with tf.name_scope('optimizer6'):
        opt6 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o6 = opt6.minimize(loss6)
    with tf.name_scope('optimizer7'):
        opt7 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
        o7 = opt7.minimize(loss7)
    return o1, o2, o3,o4,o5,o6,o7


def create_accuracy(logit1, logit2, logit3, logit4, logit5, logit6, logit7, labels):
    """
    计算准确率
    :param logit1:  [N, 65]
    :param logit2:
    :param logit3:
    :param logit4:
    :param logit5:
    :param logit6:
    :param logit7:
    :param labels:
    :return:
    """
    # 1、按照行对logits 进行组合 [7*N, 65]
    logits_all = tf.concat([logit1, logit2, logit3, logit4, logit5, logit6, logit7], axis=0)

    # 标签转置
    labels = tf.convert_to_tensor(labels, tf.int32)
    labels_all = tf.reshape(tf.transpose(labels), [-1])

    # 计算准确率
    with tf.name_scope('accuracy'):
        correct_pred = tf.nn.in_top_k(logits_all, labels_all, k=1)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    return accuracy












  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值