4用于cifar10的卷积神经网络-4.6设计模型训练和评估的会话流程

在TensorFlow中实现这个网络模型

0、加载数据集
1、启动会话
2、一轮一轮的训练模型
2.1、在每一轮中分多个批次喂给数据
2.1.1在每个批次上运行训练节点,训练模型
2.1.2经过若干个批次后,评估当前的模型,计算训练集上的损失值,准确率
3、在测试集上评估最终的模型:损失值,准确率
这里写图片描述

#-*- coding:utf-8 -*-
#实现简单卷积神经网络对MNIST数据集进行分类:conv2d + activation + pool + fc
import csv
import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
from six.moves import urllib
import tarfile
import cifar10_input
import numpy as np

# 设置算法超参数
learning_rate_init = 0.001
training_epochs = 1
batch_size = 100
display_step = 10
conv1_kernel_num = 32
conv2_kernel_num = 32
# fc1_units_num = 384
fc1_units_num = 32
fc2_units_num = 32

# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# n_classes = 10 # MNIST total classes (0-9 digits)

#数据集中输入图像的参数
dataset_dir='../CIFAR10_dataset'
# image_size = 24
# image_channel = 3
# n_classes = 10 #CiFar10中类的数量
num_examples_per_epoch_for_train = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN#50000
num_examples_per_epoch_for_eval = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL#10000
image_size = cifar10_input.IMAGE_SIZE#24
image_channel = 3
n_classes = cifar10_input.NUM_CLASSES #10个分类:CiFar10 中类的数量

#从网址下载数据集存放到data_dir指定的目录中
def maybe_download_and_extract(data_dir):
    """下载并解压缩数据集 from Alex's website."""
    dest_directory = data_dir
    DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
    if not os.path.exists(dest_directory):
        os.makedirs(dest_directory)
    filename = DATA_URL.split('/')[-1] #'cifar-10-binary.tar.gz'
    filepath = os.path.join(dest_directory, filename)#'../CIFAR10_dataset\\cifar-10-binary.tar.gz'
    if not os.path.exists(filepath):
        def _progress(count, block_size, total_size):
            sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
                float(count * block_size) / float(total_size) * 100.0))
            sys.stdout.flush()
        filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
        print()
        statinfo = os.stat(filepath)
        print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')

    extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')#'../CIFAR10_dataset\\cifar-10-batches-bin'
    if not os.path.exists(extracted_dir_path):
        tarfile.open(filepath, 'r:gz').extractall(dest_directory)

def get_distorted_train_batch(data_dir,batch_size):
    """Construct distorted input for CIFAR training using the Reader ops.

      Returns:
        images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
        labels: Labels. 1D tensor of [batch_size] size.

      Raises:
        ValueError: If no data_dir
      """
    if not data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
    images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,batch_size=batch_size)
    return images,labels

def get_undistorted_eval_batch(data_dir,eval_data, batch_size):
    """Construct input for CIFAR evaluation using the Reader ops.
    Args:
        eval_data: bool, indicating if one should use the train or eval data set.
    Returns:
        images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
        labels: Labels. 1D tensor of [batch_size] size.
    Raises:
        ValueError: If no data_dir
    """
    if not data_dir:
        raise ValueError('Please supply a data_dir')
    data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
    images, labels = cifar10_input.inputs(eval_data=eval_data,data_dir=data_dir,batch_size=batch_size)
    return images,labels

#根据指定的维数返回初始化好的指定名称的权重 Variable
def WeightsVariable(shape, name_str, stddev=0.1):
    # initial = tf.random_normal(shape=shape, stddev=stddev, dtype=tf.float32)
    initial = tf.truncated_normal(shape=shape, stddev=stddev, dtype=tf.float32)
    return tf.Variable(initial, dtype=tf.float32, name=name_str)

#根据指定的维数返回初始化好的指定名称的偏置 Variable
def BiasesVariable(shape, name_str, init_value=0.00001):
    initial = tf.constant(init_value, shape=shape)
    return tf.Variable(initial, dtype=tf.float32, name=name_str)

# 二维卷积层activation(conv2d+bias)的封装
def Conv2d(x, W, b, stride=1, padding='SAME',activation=tf.nn.relu,act_name='relu'):
    with tf.name_scope('conv2d_bias'):
        y = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
        y = tf.nn.bias_add(y, b)
    with tf.name_scope(act_name):
        y = activation(y)
    return y

# 二维池化层pool的封装
def Pool2d(x, pool= tf.nn.max_pool, k=2, stride=2,padding='SAME'):
    return pool(x, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding=padding)

# 全连接层activate(wx+b)的封装
def FullyConnected(x, W, b, activate=tf.nn.relu, act_name='relu'):
    with tf.name_scope('Wx_b'):
        y = tf.matmul(x, W)
        y = tf.add(y, b)
    with tf.name_scope(act_name):
        y = activate(y)
    return y

def Inference(image_holder):
    # 第一个卷积层activate(conv2d + biase)
    with tf.name_scope('Conv2d_1'):
        # conv1_kernel_num = 64
        weights = WeightsVariable(shape=[5, 5, image_channel, conv1_kernel_num],
                                  name_str='weights',stddev=5e-2)
        biases = BiasesVariable(shape=[conv1_kernel_num], name_str='biases',init_value=0.0)
        conv1_out = Conv2d(image_holder, weights, biases, stride=1, padding='SAME')

    # 第一个池化层(pool 2d)
    with tf.name_scope('Pool2d_1'):
        pool1_out = Pool2d(conv1_out, pool=tf.nn.max_pool, k=3, stride=2,padding='SAME')

    # 第二个卷积层activate(conv2d + biase)
    with tf.name_scope('Conv2d_2'):
        # conv2_kernels_num = 64
        weights = WeightsVariable(shape=[5, 5, conv1_kernel_num, conv2_kernel_num],
                                  name_str='weights', stddev=5e-2)
        biases = BiasesVariable(shape=[conv2_kernel_num], name_str='biases', init_value=0.0)
        conv2_out = Conv2d(pool1_out, weights, biases, stride=1, padding='SAME')

    # 第二个池化层(pool 2d)
    with tf.name_scope('Pool2d_2'):
        pool2_out = Pool2d(conv2_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME')

    #将二维特征图变换为一维特征向量
    with tf.name_scope('FeatsReshape'):
        features = tf.reshape(pool2_out, [batch_size,-1])
        feats_dim = features.get_shape()[1].value

    # 第一个全连接层(fully connected layer)
    with tf.name_scope('FC1_nonlinear'):
        # fc1_units_num = 384
        weights = WeightsVariable(shape=[feats_dim, fc1_units_num],
                                  name_str='weights',stddev=4e-2)
        biases = BiasesVariable(shape=[fc1_units_num], name_str='biases',init_value=0.1)
        fc1_out = FullyConnected(features, weights, biases, activate=tf.nn.relu, act_name='relu')

    # 第二个全连接层(fully connected layer)
    with tf.name_scope('FC2_nonlinear'):
        # fc2_units_num = 192
        weights = WeightsVariable(shape=[fc1_units_num, fc2_units_num],
                                  name_str='weights',stddev=4e-2)
        biases = BiasesVariable(shape=[fc2_units_num], name_str='biases',init_value=0.1)
        fc2_out = FullyConnected(fc1_out, weights, biases,activate=tf.nn.relu, act_name='relu')

    # 第三个全连接层(fully connected layer)
    with tf.name_scope('FC3_linear'):
        fc3_units_num = n_classes
        weights = WeightsVariable(shape=[fc2_units_num, fc3_units_num],
                                  name_str='weights',stddev=1.0/fc2_units_num)
        biases = BiasesVariable(shape=[fc3_units_num], name_str='biases',init_value=0.0)
        logits = FullyConnected(fc2_out, weights, biases,activate=tf.identity, act_name='linear')
    return logits

def TrainModel():
    #调用上面写的函数构造计算图
    with tf.Graph().as_default():

        # 计算图输入
        with tf.name_scope('Inputs'):
            image_holder = tf.placeholder(tf.float32, [batch_size, image_size,image_size,image_channel], name='images')
            labels_holder = tf.placeholder(tf.int32, [batch_size], name='labels')

        # 计算图前向推断过程
        with tf.name_scope('Inference'):
             logits = Inference(image_holder)

        # 定义损失层(loss layer)
        with tf.name_scope('Loss'):
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_holder,logits=logits)
            cross_entropy_mean = tf.reduce_mean(cross_entropy)
            total_loss = cross_entropy_mean

        # 定义优化训练层(train layer)
        with tf.name_scope('Train'):
            learning_rate = tf.placeholder(tf.float32)
            global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int64)
            optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
            train_op = optimizer.minimize(total_loss,global_step=global_step)

        # 定义模型评估层(evaluate layer)
        with tf.name_scope('Evaluate'):
            top_K_op = tf.nn.in_top_k(predictions=logits,targets=labels_holder,k=1)

        #定义获取训练样本批次的计算节点
        with tf.name_scope('GetTrainBatch'):
            image_train,labels_train = get_distorted_train_batch(data_dir=dataset_dir,batch_size=batch_size)

        # 定义获取测试样本批次的计算节点
        with tf.name_scope('GetTestBatch'):
            image_test, labels_test = get_undistorted_eval_batch(eval_data=True,data_dir=dataset_dir, batch_size=batch_size)

        # 添加所有变量的初始化节点
        init_op = tf.global_variables_initializer()

        print('把计算图写入事件文件,在TensorBoard里面查看')
        graph_writer = tf.summary.FileWriter(logdir='logs', graph=tf.get_default_graph())
        graph_writer.close()

        # 将评估结果保存到文件
        results_list = list()

        # 写入参数配置
        results_list.append(['learning_rate', learning_rate_init,
                             'training_epochs', training_epochs,
                             'batch_size', batch_size,
                             'conv1_kernel_num', conv1_kernel_num,
                             'conv2_kernel_num', conv2_kernel_num,
                             'fc1_units_num', fc1_units_num,
                             'fc2_units_num', fc2_units_num])
        results_list.append(['train_step', 'train_loss','train_step', 'train_accuracy'])

        with tf.Session() as sess:
            sess.run(init_op)
            print('===>>>>>>>==开始训练集上训练模型==<<<<<<<=====')
            total_batches = int(num_examples_per_epoch_for_train / batch_size)
            print('Per batch Size:,',batch_size)
            print('Train sample Count Per Epoch:',num_examples_per_epoch_for_train)
            print('Total batch Count Per Epoch:', total_batches)

            #启动数据读取队列
            tf.train.start_queue_runners()
            #记录模型被训练的步数
            training_step = 0
            # 训练指定轮数,每一轮的训练样本总数为:num_examples_per_epoch_for_train
            for epoch in range(training_epochs):
                #每一轮都要把所有的batch跑一遍
                for batch_idx in range(total_batches):
                    #运行获取训练数据的计算图,取出一个批次数据
                    images_batch ,labels_batch = sess.run([image_train,labels_train])
                    #运行优化器训练节点
                    _,loss_value = sess.run([train_op,total_loss],
                                            feed_dict={image_holder:images_batch,
                                                       labels_holder:labels_batch,
                                                       learning_rate:learning_rate_init})
                    #每调用一次训练节点,training_step就加1,最终==training_epochs * total_batch
                    training_step = sess.run(global_step)
                    #每训练display_step次,计算当前模型的损失和分类准确率
                    if training_step % display_step == 0:
                        #运行accuracy节点,计算当前批次的训练样本的准确率
                        predictions = sess.run([top_K_op],
                                               feed_dict={image_holder:images_batch,
                                                          labels_holder:labels_batch})
                        #当前批次上的预测正确的样本量
                        batch_accuracy = np.sum(predictions)/batch_size
                        results_list.append([training_step,loss_value,training_step,batch_accuracy])
                        print("Training Step:" + str(training_step) +
                              ",Training Loss = " + "{:.6f}".format(loss_value) +
                              ",Training Accuracy = " + "{:.5f}".format(batch_accuracy) )
            print('训练完毕')

            print('===>>>>>>>==开始在测试集上评估模型==<<<<<<<=====')
            total_batches = int(num_examples_per_epoch_for_eval / batch_size)
            total_examples = total_batches * batch_size
            print('Per batch Size:,', batch_size)
            print('Test sample Count Per Epoch:', total_examples)
            print('Total batch Count Per Epoch:', total_batches)
            correct_predicted = 0
            for test_step in range(total_batches):
                #运行获取测试数据的计算图,取出一个批次测试数据
                images_batch,labels_batch = sess.run([image_test,labels_test])
                #运行accuracy节点,计算当前批次的测试样本的准确率
                predictions = sess.run([top_K_op],
                                       feed_dict={image_holder:images_batch,
                                                  labels_holder:labels_batch})
                #累计每个批次上的预测正确的样本量
                correct_predicted += np.sum(predictions)

            accuracy_score = correct_predicted / total_examples
            print('---------->Accuracy on Test Examples:',accuracy_score)
            results_list.append(['Accuracy on Test Examples:',accuracy_score])
            # 将评估结果保存到文件
            results_file = open('evaluate_results.csv', 'w', newline='')
            csv_writer = csv.writer(results_file, dialect='excel')
            for row in results_list:
                csv_writer.writerow(row)

def main(argv=None):
    maybe_download_and_extract(data_dir=dataset_dir)
    train_dir='train/'
    if tf.gfile.Exists(train_dir):
        tf.gfile.DeleteRecursively(train_dir)
    tf.gfile.MakeDirs(train_dir)
    TrainModel()

if __name__ =='__main__':
    tf.app.run()

输出:

把计算图写入事件文件,在TensorBoard里面查看
===>>>>>>>==开始训练集上训练模型==<<<<<<<=====
Per batch Size:, 100
Train sample Count Per Epoch: 50000
Total batch Count Per Epoch: 500
Training Step:10,Training Loss = 2.306586,Training Accuracy = 0.06000
Training Step:20,Training Loss = 2.306300,Training Accuracy = 0.06000
Training Step:30,Training Loss = 2.303389,Training Accuracy = 0.10000
Training Step:40,Training Loss = 2.302423,Training Accuracy = 0.09000
Training Step:50,Training Loss = 2.302983,Training Accuracy = 0.14000
Training Step:60,Training Loss = 2.301472,Training Accuracy = 0.14000
Training Step:70,Training Loss = 2.306488,Training Accuracy = 0.08000
Training Step:80,Training Loss = 2.303758,Training Accuracy = 0.17000
Training Step:90,Training Loss = 2.301871,Training Accuracy = 0.19000
Training Step:100,Training Loss = 2.299527,Training Accuracy = 0.12000
Training Step:110,Training Loss = 2.298491,Training Accuracy = 0.15000
Training Step:120,Training Loss = 2.286592,Training Accuracy = 0.20000
Training Step:130,Training Loss = 2.202128,Training Accuracy = 0.22000
Training Step:140,Training Loss = 2.026917,Training Accuracy = 0.25000
Training Step:150,Training Loss = 2.103927,Training Accuracy = 0.22000
Training Step:160,Training Loss = 2.094987,Training Accuracy = 0.25000
Training Step:170,Training Loss = 1.953211,Training Accuracy = 0.32000
Training Step:180,Training Loss = 1.974621,Training Accuracy = 0.27000
Training Step:190,Training Loss = 1.944889,Training Accuracy = 0.29000
Training Step:200,Training Loss = 1.896667,Training Accuracy = 0.32000
Training Step:210,Training Loss = 2.002789,Training Accuracy = 0.25000
Training Step:220,Training Loss = 2.057821,Training Accuracy = 0.29000
Training Step:230,Training Loss = 1.891200,Training Accuracy = 0.33000
Training Step:240,Training Loss = 1.877370,Training Accuracy = 0.33000
Training Step:250,Training Loss = 1.904065,Training Accuracy = 0.34000
Training Step:260,Training Loss = 1.795470,Training Accuracy = 0.41000
Training Step:270,Training Loss = 1.956472,Training Accuracy = 0.35000
Training Step:280,Training Loss = 1.893711,Training Accuracy = 0.30000
Training Step:290,Training Loss = 1.737094,Training Accuracy = 0.34000
Training Step:300,Training Loss = 1.759201,Training Accuracy = 0.43000
Training Step:310,Training Loss = 2.055783,Training Accuracy = 0.33000
Training Step:320,Training Loss = 1.666109,Training Accuracy = 0.42000
Training Step:330,Training Loss = 1.816121,Training Accuracy = 0.32000
Training Step:340,Training Loss = 1.806642,Training Accuracy = 0.41000
Training Step:350,Training Loss = 1.779170,Training Accuracy = 0.35000
Training Step:360,Training Loss = 1.755931,Training Accuracy = 0.45000
Training Step:370,Training Loss = 1.692869,Training Accuracy = 0.43000
Training Step:380,Training Loss = 1.975068,Training Accuracy = 0.34000
Training Step:390,Training Loss = 1.735186,Training Accuracy = 0.42000
Training Step:400,Training Loss = 1.651298,Training Accuracy = 0.43000
Training Step:410,Training Loss = 1.725129,Training Accuracy = 0.45000
Training Step:420,Training Loss = 1.673964,Training Accuracy = 0.43000
Training Step:430,Training Loss = 1.848086,Training Accuracy = 0.37000
Training Step:440,Training Loss = 1.689408,Training Accuracy = 0.41000
Training Step:450,Training Loss = 1.647934,Training Accuracy = 0.41000
Training Step:460,Training Loss = 1.587230,Training Accuracy = 0.43000
Training Step:470,Training Loss = 1.714040,Training Accuracy = 0.47000
Training Step:480,Training Loss = 1.771270,Training Accuracy = 0.49000
Training Step:490,Training Loss = 1.757897,Training Accuracy = 0.38000
Training Step:500,Training Loss = 1.656613,Training Accuracy = 0.41000
训练完毕
===>>>>>>>==开始在测试集上评估模型==<<<<<<<=====
Per batch Size:, 100
Test sample Count Per Epoch: 10000
Total batch Count Per Epoch: 100
---------->Accuracy on Test Examples: 0.4365

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值