DL练习6—CNN实现0-9数字分类模型的训练

待训练图片

提取图片特征,存成npy文件

import multiprocessing
import os, time, random
import numpy as np
import cv2
import os
import sys
from time import ctime
import tensorflow as tf

image_dir = r"D:/sxl/处理图片/汉字分类/train10/"       #图像文件夹路径
data_type = 'test'
save_path = r'E:/sxl_Programs/Python/CNN/npy/'       #存储路径
data_name = 'Img10'                                #npy文件名

char_set = np.array(os.listdir(image_dir))            #文件夹名称列表
np.save(save_path+'ImgShuZi10.npy',char_set)          #文件夹名称列表
char_set_n = len(char_set)                            #文件夹列表长度

read_process_n = 1    #进程数
repate_n = 4          #随机移动次数
data_size = 1000000   #1个npy大小

shuffled = True      #是否打乱

#可以读取带中文路径的图
def cv_imread(file_path,type=0):
    cv_img=cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),-1)
    # print(file_path)
    # print(cv_img.shape)
    # print(len(cv_img.shape))
    if(type==0):
        if(len(cv_img.shape)==3):
            cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
    return cv_img

#多个数组按同一规则打乱数据
def ShuffledData(features,labels):
    '''
    @description:随机打乱数据与标签,但保持数据与标签一一对应
    @author:RenHui
    '''
    permutation = np.random.permutation(features.shape[0])
    shuffled_features = features[permutation,:]  #多维
    shuffled_labels = labels[permutation]       #1维
    return shuffled_features,shuffled_labels

#函数功能:简单网格
#函数要求:1.无关图像大小;2.输入图像默认为灰度图;3.参数只有输入图像
#返回数据:1x64*64维特征
def GetFeature(image):

    #图像大小归一化
    image = cv2.resize(image,(64,64))
    img_h = image.shape[0]
    img_w = image.shape[1]

    #定义特征向量
    feature = np.zeros(img_h*img_w,dtype=np.float32)

    for h in range(img_h):
        for w in range(img_w):
            feature[h*img_h+w] = image[h,w]

    return feature

# 写数据进程执行的代码:
def read_image_to_queue(queue):
    print('Process to write: %s' % os.getpid())
    for j,dirname in enumerate(char_set):  # dirname 是文件夹名称
        label = np.where(char_set==dirname)[0][0]     #文件夹名称对应的下标序号
        print('序号:'+str(j),'读 '+dirname+' 文件夹...时间:',ctime() )
        for parent,_,filenames in os.walk(os.path.join(image_dir,dirname)):
            for filename in filenames:
                if(filename[-4:]!='.jpg'):
                    continue
                image = cv_imread(os.path.join(parent,filename),0)

                # cv2.imshow(dirname,image)
                # cv2.waitKey(0)
                # cv2.imwrite(("D:/%s.jpg"%label),image)
                queue.put((image,label))

                a=0
    
    for i in range(read_process_n):
        queue.put((None,-1))

    print('读图结束!')
    return True
        
# 读数据进程执行的代码:
def extract_feature(queue,lock,count):
    '''
    @description:从队列中取出图片进行特征提取
    @queue:先进先出队列
     lock:锁,在计数时上锁,防止冲突
     count:计数
    '''

    print('Process %s start reading...' % os.getpid())

    global data_n
    features = [] #存放提取到的特征
    labels = [] #存放标签
    flag = True #标志着进程是否结束
    while flag:
        image,label = queue.get()  #从队列中获取图像和标签

        if len(features) >= data_size or label == -1:   #特征数组的长度大于指定长度,则开始存储

            array_features = np.array(features)  #转换成数组
            array_labels = np.array(labels)

            array_features,array_labels = ShuffledData(array_features,array_labels)  # 打乱数据
            array_features, array_labels = ShuffledData(array_features, array_labels)  # 打乱数据
            
            lock.acquire()   # 锁开始

            # 拆分数据为训练集,测试集
            split_x = int(array_features.shape[0] * 0.8)
            train_data, test_data = np.split(array_features, [split_x], axis=0)     # 拆分特征数据集
            train_labels, test_labels = np.split(array_labels, [split_x], axis=0)  # 拆分标签数据集

            count.value += 1    #下标计数加1
            str_features_name_train = data_name+'_features_train_'+str(count.value)+'.npy'
            str_labels_name_train = data_name+'_labels_train_'+str(count.value)+'.npy'
            str_features_name_test = data_name+'_features_test_'+str(count.value)+'.npy'
            str_labels_name_test = data_name+'_labels_test_'+str(count.value)+'.npy'

            lock.release()   # 锁释放

            np.save(save_path+str_features_name_train,train_data)
            np.save(save_path+str_labels_name_train,train_labels)
            np.save(save_path+str_features_name_test,test_data)
            np.save(save_path+str_labels_name_test,test_labels)
            print(os.getpid(),'save:',str_features_name_train)
            print(os.getpid(),'save:',str_labels_name_train)
            print(os.getpid(),'save:',str_features_name_test)
            print(os.getpid(),'save:',str_labels_name_test)
            features.clear()
            labels.clear()

        if label == -1:
            break

        # 获取特征向量,传入灰度图
        feature = GetFeature(image)
        features.append(feature)
        labels.append(label)

        # # 随机移动4次
        # for itime in range(repate_n):
        #     rMovedImage = randomMoveImage(image)
        #     feature = SimpleGridFeature(rMovedImage)  # 简单网格
        #     features.append(feature)
        #     labels.append(label)
    
    print('Process %s is done!' % os.getpid())

if __name__=='__main__':
    time_start = time.time()  # 开始计时

    # 父进程创建Queue,并传给各个子进程:
    image_queue = multiprocessing.Queue(maxsize=1000)  #队列
    lock = multiprocessing.Lock()                      #锁
    count = multiprocessing.Value('i',0)               #计数

    #将图写入队列进程
    write_sub_process = multiprocessing.Process(target=read_image_to_queue, args=(image_queue,))

    read_sub_processes = []                            #读图子线程
    for i in range(read_process_n):
        read_sub_processes.append(
            multiprocessing.Process(target=extract_feature, args=(image_queue,lock,count))
        )

    # 启动子进程pw,写入:
    write_sub_process.start()

    # 启动子进程pr,读取:
    for p in read_sub_processes:
        p.start()

    # 等待进程结束:
    write_sub_process.join()
    for p in read_sub_processes:
        p.join()

    time_end=time.time()
    time_h=(time_end-time_start)/3600
    print('用时:%.6f 小时'% time_h)
    print ("读图提取特征存npy,运行结束!")

10分类模型训练

# !/usr/bin/env python
# coding: utf-8
import numpy as np
import os
import tensorflow as tf
import cv2
from tensorflow.python.framework import graph_util

MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "data10_model"

n_label = 10          # 标签维度
batch_size = 1600     # 每个批次的大小
learning_rate = 0.001 # 初始学习速率时0.1
decay_rate = 0.96     # 衰减率
global_steps = 30001  # 总的迭代次数
decay_steps = 100     # 衰减次数


# 参数概要,tf.summary.scalar的作用主要是存储变量,并赋予变量名,tf.name_scope主要是给表达式命名
def variable_summaries(var):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)  # 平均值
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)  # 标准差
        tf.summary.scalar('max', tf.reduce_max(var))  # 最大值
        tf.summary.scalar('min', tf.reduce_min(var))  # 最小值
        tf.summary.histogram('histogram', var)  # 直方图


# 初始化权值
def weight_variable(shape, name):
    initial = tf.truncated_normal(shape, stddev=0.1)  # 生成一个截断的正态分布
    return tf.Variable(initial, name=name)


# 初始化偏置
def bias_variable(shape, name):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial, name=name)


# 卷积层
def conv2d(x, W):
    # x input tensor of shape [batch,in_height,in_width,in_channels]
    # W filter/ kernel tensor of shape [filter_height,filter_width,in_channels,out_channels]
    # strides[0]=strides[3]=1恒等于1,
    # strides[1]代表x方向的步长,strides[2]代表y方向的步长,
    # padding:"SAME","VALID"
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


# 池化层
def max_pool_2x2(x):
    # x input tensor of shape [batch,in_height,in_width,in_channels]
    # ksize [1,x,y,1]
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


# 从数字标签转换为数组标签 [0,0,0,...1,0,0]
def InitImagesLabels(labels_batch):
    labels_batch_new = []
    for id in labels_batch:
        aa = np.zeros(n_label, np.float32)
        aa[int(id)] = 1
        labels_batch_new.append(aa)
    return labels_batch_new

# 显示图像与标签
def ShowImageAndLabels(batch_xs, batch_ys):
    img_h = 64
    img_w = 64
    img = np.ones((img_h, img_w), dtype=np.uint8)
    icount=0
    for batch_image in batch_xs:  #转换成图像

        for h in range(img_h):
            for w in range(img_w):
                img[h, w]=batch_image[h * img_h + w] #图像复原

        sss="%d"%batch_ys[icount]
        cv2.imshow(sss,img)
        cv2.waitKey(0)

        icount+=1

# keep_prob用来表示神经元的输出概率
with tf.name_scope('keep_prob'):
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

# 命名空间
with tf.name_scope('input'):
    # 定义两个placeholder
    x = tf.placeholder(tf.float32, [None, 4096], name='x-input')
    y = tf.placeholder(tf.float32, [None, 10], name='y-input')
    with tf.name_scope('x_image'):
        # 改变x的格式转为4D的向量[batch,in_height,in_width,in_channels]
        x_image = tf.reshape(x, [-1, 64, 64, 1], name='x_image')

with tf.name_scope('Conv1'):
    # 初始化第一个卷积层的权值和偏置
    with tf.name_scope('W_conv1'):
        W_conv1 = weight_variable([3, 3, 1, 32], name='W_conv1')  # 5*5的采样窗口,32个卷积核从1个平面抽取特征
    with tf.name_scope('b_conv1'):
        b_conv1 = bias_variable([32], name='b_conv1')  # 每一个卷积核一个偏置值

    # 把x_image和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
    with tf.name_scope('conv2d_1'):
        conv2d_1 = conv2d(x_image, W_conv1) + b_conv1
    with tf.name_scope('h_pool1'):
        h_pool1 = max_pool_2x2(conv2d_1)  # 进行max_pooling
    with tf.name_scope('h_pool1_drop'):
        h_pool1 = tf.nn.dropout(h_pool1, keep_prob, name='h_pool1_drop')

with tf.name_scope('Conv2'):
    # 初始化第二个卷积层的权值和偏置
    with tf.name_scope('W_conv2'):
        W_conv2 = weight_variable([3, 3, 32, 64], name='W_conv2')  # 5*5的采样窗口,64个卷积核从32个平面抽取特征
    with tf.name_scope('b_conv2'):
        b_conv2 = bias_variable([64], name='b_conv2')  # 每一个卷积核一个偏置值

    # 把h_pool1和权值向量进行卷积,再加上偏置值,
    with tf.name_scope('conv2d_2'):
        conv2d_2 = conv2d(h_pool1, W_conv2) + b_conv2
    with tf.name_scope('h_pool2'):
        h_pool2 = max_pool_2x2(conv2d_2)  # 进行max_pooling
    with tf.name_scope('h_pool2_drop'):
        h_pool2 = tf.nn.dropout(h_pool2, keep_prob, name='h_pool2_drop')

with tf.name_scope('Conv3'):
    # 初始化第三个卷积层的权值和偏置
    with tf.name_scope('W_conv3'):
        W_conv3 = weight_variable([3, 3, 64, 64], name='W_conv3')  # 5*5的采样窗口,64个卷积核从64个平面抽取特征
    with tf.name_scope('b_conv3'):
        b_conv3 = bias_variable([64], name='b_conv3')  # 每一个卷积核一个偏置值

    # 把h_pool2和权值向量进行卷积,再加上偏置值,
    with tf.name_scope('conv2d_3'):
        conv2d_3 = conv2d(h_pool2, W_conv3) + b_conv3
    with tf.name_scope('h_pool3'):
        h_pool3 = max_pool_2x2(conv2d_3)  # 进行max_pooling
    with tf.name_scope('h_pool3_drop'):
        h_pool3 = tf.nn.dropout(h_pool3, keep_prob, name='h_pool3_drop')

# 64*64的图片第一次卷积后还是64*64,第一次池化后变为32*32
# 第二次卷积后为32*32,第二次池化后变成了16*16
# 第三次卷积后为16*16,第二次池化后变成了8*8
# 经过上面操作后得到64张8*8的平面

with tf.name_scope('fc1'):
    # 初始化第一个全连接层的权值
    with tf.name_scope('W_fc1'):
        W_fc1 = weight_variable([8 * 8 * 64, 1024], name='W_fc1')  # 上一层有7*7*64个神经元,全连接层有1024个神经元
    with tf.name_scope('b_fc1'):
        b_fc1 = bias_variable([1024], name='b_fc1')  # 1024个节点

    # 把池化层3的输出扁平化为1维
    with tf.name_scope('h_pool2_flat'):
        h_pool2_flat = tf.reshape(h_pool3, [-1, 8 * 8 * 64], name='h_pool2_flat')
    # 求第一个全连接层的输出
    with tf.name_scope('wx_plus_b1'):
        wx_plus_b1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1
    with tf.name_scope('relu'):
        # h_fc1 = tf.nn.relu(wx_plus_b1)
        h_fc1 = tf.nn.sigmoid(wx_plus_b1)
    with tf.name_scope('h_fc1_drop'):
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name='h_fc1_drop')



with tf.name_scope('fc2'):
    # 初始化第二个全连接层
    with tf.name_scope('W_fc2'):
        W_fc2 = weight_variable([1024, 10], name='W_fc2')
    with tf.name_scope('b_fc2'):
        b_fc2 = bias_variable([10], name='b_fc2')
    with tf.name_scope('wx_plus_b2'):
        wx_plus_b2 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
    with tf.name_scope('softmax'):
        # 计算输出
        # prediction = tf.nn.softmax(wx_plus_b2)  # tf.nn.softmax_cross_entropy_with_logits 有softmax操作
        prediction = wx_plus_b2

# 交叉熵代价函数
with tf.name_scope('cross_entropy'):
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction),
                                   name='cross_entropy')
    tf.summary.scalar('cross_entropy', cross_entropy)

# 使用AdamOptimizer进行优化
with tf.name_scope('train'):
    # 运行了几轮batch_size的计数器,初值给0,设为不被训练
    global_step = tf.Variable(0, trainable=False)
    train_step = tf.train.AdamOptimizer(learning_rate,).minimize(cross_entropy,global_step=global_step)  # 断点续训这里不加global_step=global_step会出错

# 求准确率
with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
        # 结果存放在一个布尔列表中
        correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))  # argmax返回一维张量中最大的值所在的位置
    with tf.name_scope('accuracy'):
        # 求准确率
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

# 合并所有的summary
merged = tf.summary.merge_all()


saver = tf.train.Saver(max_to_keep=1)



with tf.Session() as sess:
    print("启动执行...")
    sess.run(tf.global_variables_initializer())

    # 加入断点续训功能
    ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)

    train_writer = tf.summary.FileWriter('D:/logs/train', sess.graph)
    test_writer = tf.summary.FileWriter('D:/logs/test', sess.graph)

    # 读取数据
    train_data_npyPath = (r"D:/SXL/npy/Img10_features_train_1.npy")  # npy路径
    train_labels_npyPath = (r"D:/SXL/npy/Img10_labels_train_1.npy")  # npy路径

    test_data_npyPath = (r"D:/SXL/npy/Img10_features_test_1.npy")  # npy路径
    test_labels_npyPath = (r"D:/SXL/npy/Img10_labels_test_1.npy")  # npy路径

    train_data = np.load(train_data_npyPath).astype(np.float32)  # 加载数据
    train_labels = np.load(train_labels_npyPath).astype(np.float32)  # 加载数据

    test_data = np.load(test_data_npyPath).astype(np.float32)  # 加载数据
    test_labels = np.load(test_labels_npyPath).astype(np.float32)  # 加载数据

    step = 0
    while step < global_steps:
        # 训练模型
        # 记录训练集计算的参数
        flag = step
        if ((flag + 1) * batch_size > len(train_data)):
            flag = 0
        batch_xs, batch_ys = train_data[flag * batch_size:(flag + 1) * batch_size], train_labels[flag * batch_size:(flag + 1) * batch_size]  # 获取一块最小数据集

        batch_ys_new = InitImagesLabels(batch_ys)  # 从数字标签转换为数组标签 [0,0,0,...1,0,0]
        _, loss, step= sess.run([train_step, cross_entropy, global_step], feed_dict={x: batch_xs, y: batch_ys_new, keep_prob: 0.5})
        summary = sess.run(merged, feed_dict={x: batch_xs, y: batch_ys_new, keep_prob: 1.0})
        train_writer.add_summary(summary, step)

        # 计算测试集计算的参数
        flag2 = step
        if ((flag2 + 1) * batch_size > len(test_data)):
            flag2 = 0
        batch_xs, batch_ys = test_data[flag2 * batch_size:(flag2 + 1) * batch_size], test_labels[flag2 * batch_size:(flag2 + 1) * batch_size]  # 获取一块最小数据集
        batch_ys_new = InitImagesLabels(batch_ys)  # 从数字标签转换为数组标签 [0,0,0,...1,0,0]
        summary = sess.run(merged, feed_dict={x: batch_xs, y: batch_ys_new, keep_prob: 1.0})
        test_writer.add_summary(summary, step)

        if step % 100 == 0:
            saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
            print(step,":",loss)

        if step % 100 == 0:
            # 训练集正确率
            icount_train_batchsize = int(len(train_data) / batch_size)
            sum_train_acc = 0
            for iNo in range(0, icount_train_batchsize):
                train_batch_xs, train_batch_ys = train_data[iNo * batch_size:(iNo + 1) * batch_size], train_labels[iNo * batch_size:(iNo + 1) * batch_size]  # 获取一块最小数据集
                train_labels_new = InitImagesLabels(train_batch_ys)  # 从数字标签转换为数组标签 [0,0,0,...1,0,0]
                train_acc = sess.run(accuracy, feed_dict={x: train_batch_xs, y: train_labels_new, keep_prob: 1.0})
                sum_train_acc = sum_train_acc + train_acc
            sum_train_acc = sum_train_acc / icount_train_batchsize
            print("Iter " + str(step) + ",Training Accuracy=" + str(sum_train_acc))

            # 测试集正确率
            icount_test_batchsize = int(len(test_data) / batch_size)
            sum_test_acc = 0
            for iNo in range(0, icount_test_batchsize):
                test_batch_xs, test_batch_ys = test_data[iNo * batch_size:(iNo + 1) * batch_size], test_labels[iNo * batch_size:(iNo + 1) * batch_size]  # 获取一块最小数据集
                test_labels_new = InitImagesLabels(test_batch_ys)  # 从数字标签转换为数组标签 [0,0,0,...1,0,0]
                test_acc = sess.run(accuracy, feed_dict={x: test_batch_xs, y: test_labels_new, keep_prob: 1.0})
                sum_test_acc = sum_test_acc + test_acc
            sum_test_acc = sum_test_acc / icount_test_batchsize
            print("Iter " + str(step) + ",Testing Accuracy=" + str(sum_test_acc))

训练结果:

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值