CIFAR10+卷积神经网络+TensorBoard 实战

1、CIFAR-10数据集的简介

cifar10是由Hinton的学生整理的一个用于识别物体的小型数据集,一共包含10个类别的RGB彩色数据图片,如上图所示。图片的尺寸为32X32,共50000张训练图片和10000张测试图片。 

项目代码:Tensorflow提供的一些代码

使用cifar10_download.py即可下载CIFAR-10数据集的全部数据 

 

2、直接上代码

(1)导入数据包,设置模型参数

from tensorflow.models.tutorials.image.cifar10 import cifar10
from tensorflow.models.tutorials.image.cifar10 import cifar10_input

import tensorflow as tf
import numpy as np
import time
import tools
max_steps = 6000 # 训练轮数
batch_size = 128  #一个bacth的大小
data_dir = './cifar-10-batches-bin' #读取数据文件夹
LOG_DIR = './LOG'

(2)定义函数

def variable_with_weight_loss(shape, stddev, w1):
    var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))
    if w1 is not None:
        weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss')
        tf.add_to_collection('losses', weight_loss)
    return var
def loss(logits, labels):
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits\
        (logits=logits, labels=labels, name='total_loss')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entorpy')
    tf.add_to_collection('losses', cross_entropy_mean)
    return tf.add_n(tf.get_collection('losses'), name='total_loss')

TensorBoard参数概要 

# 参数概要
def variable_summaries(var):
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram

 抽取训练数据集,cifar10_input.distorted_inputs这个很容易出现bug,cifar10_input.distorted_inputs定义不同

#下载CIFAR数据集 如果不好用直接
# http://www.cs.toronto.edu/~kriz/cifar.html 下载CIFAR-10 binary version 文件解压放到相应的文件夹中
#cifar10.maybe_download_and_extract()
#得到训练集的images和labels
#print(images_train) 可知是一个shape= [128, 24, 24, 3]的tensor
images_train, labels_train = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size)
#得到测试集的images和labels
images_test, labels_test = cifar10_input.\
    inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)
#以上两个为什么分别用distorted_inputs and inputs  请go to definition查询

定义输入placeholder

#创建输入数据的placeholder
with tf.name_scope('input_holder'):
    image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3]

第一层卷积层,3*3的卷积核,通道数为3,卷积核个数32(以下类似) 

with tf.name_scope('conv1'):
    #加上更多的name_scope 使graph更加清晰好看,代码也更加清晰
    with tf.name_scope('weight1'): #权重
        weight1 = variable_with_weight_loss(shape=[3, 3, 3, 32], stddev=5e-2, w1=0.0)
        #运用tensorboard进行显示
        variable_summaries(weight1)
    kernel1 = tf.nn.conv2d(image_holder, weight1, strides=[1, 1, 1, 1], padding='SAME')
    with tf.name_scope('bias1'): #偏置
        bias1 = tf.Variable(tf.constant(0.0, shape=[32]))
        variable_summaries(bias1)
    with tf.name_scope('forward1'): #经过这个神经网络的前向传播的算法结果
        conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))#cnn加上bias需要调用bias_add不能直接+
with tf.name_scope('conv2'):
    with tf.name_scope('weight2'):
        weight2 = variable_with_weight_loss(shape=[3, 3, 32, 32], stddev=5e-2, w1=0.0)
        variable_summaries(weight2)
    kernel2 = tf.nn.conv2d(conv1, weight2, strides=[1, 1, 1, 1], padding='SAME')
    with tf.name_scope('bias2'):
        bias2 = tf.Variable(tf.constant(0.1, shape=[32]))
        variable_summaries(bias2)
    with tf.name_scope('forward2'):
        conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))
with tf.name_scope('norm_pool'):
    with tf.name_scope('LRN'):
        norm1 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75)

with tf.name_scope('conv3'):
    with tf.name_scope('weight3'):
        weight3 = variable_with_weight_loss(shape=[3, 3, 32, 64], stddev=5e-2, w1=0.0)
        variable_summaries(weight2)
    kernel3 = tf.nn.conv2d(norm1, weight3, strides=[1, 1, 1, 1], padding='SAME')
    with tf.name_scope('bias3'):
        bias3 = tf.Variable(tf.constant(0.1, shape=[64]))
        variable_summaries(bias3)
    with tf.name_scope('forward3'):
        conv3 = tf.nn.relu(tf.nn.bias_add(kernel3, bias3))
with tf.name_scope('conv4'):
    with tf.name_scope('weight4'):
        weight4 = variable_with_weight_loss(shape=[3, 3, 64, 64], stddev=5e-2, w1=0.0)
        variable_summaries(weight4)
    kernel4 = tf.nn.conv2d(conv3, weight4, strides=[1, 1, 1, 1], padding='SAME')
    with tf.name_scope('bias4'):
        bias4 = tf.Variable(tf.constant(0.1, shape=[64]))
        variable_summaries(bias4)
    with tf.name_scope('forward4'):
        conv4 = tf.nn.relu(tf.nn.bias_add(kernel4, bias4))
with tf.name_scope('pool1'):
    # ksize和stride不同 , 多样性
    pool1 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 3, 3, 1], padding='SAME')

with tf.name_scope('conv5'):
    with tf.name_scope('weight5'):
        weight5 = variable_with_weight_loss(shape=[3, 3, 64, 128], stddev=5e-2, w1=0.0)
        variable_summaries(weight5)
    kernel5 = tf.nn.conv2d(pool1, weight5, strides=[1, 1, 1, 1], padding='SAME')
    with tf.name_scope('bias5'):
        bias5 = tf.Variable(tf.constant(0.1, shape=[128]))
        variable_summaries(bias5)
    with tf.name_scope('forward5'):
        conv5 = tf.nn.relu(tf.nn.bias_add(kernel5, bias5))
with tf.name_scope('conv6'):
    with tf.name_scope('weight6'):
        weight6 = variable_with_weight_loss(shape=[3, 3, 128, 128], stddev=5e-2, w1=0.0)
        variable_summaries(weight6)
    kernel6 = tf.nn.conv2d(conv5, weight6, strides=[1, 1, 1, 1], padding='SAME')
    with tf.name_scope('bias6'):
        bias6 = tf.Variable(tf.constant(0.1, shape=[128]))
        variable_summaries(bias6)
    with tf.name_scope('forward6'):
        conv6 = tf.nn.relu(tf.nn.bias_add(kernel6, bias6))

with tf.name_scope('pool2'):
    # ksize和stride不同 , 多样性
    pool2 = tf.nn.max_pool(conv6, ksize=[1, 2, 2, 1], strides=[1, 3, 3, 1], padding='SAME')

with tf.name_scope('keep_prob'):
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')


# 全连接网络
with tf.name_scope('fnn1'):
    reshape = tf.reshape(pool2, [batch_size, -1])
    dim = reshape.get_shape()[1].value
    with tf.name_scope('weight7'):
        weight7 = variable_with_weight_loss(shape=[dim, 384], stddev=0.04, w1=0.004)
        variable_summaries(weight7)
    with tf.name_scope('bias7'):
        bias7 = tf.Variable(tf.constant(0.1, shape=[384]))
        variable_summaries(bias7)
    local7 = tf.nn.relu(tf.matmul(reshape, weight7) + bias7)

    # 使用keep_prob
    local7_drop = tf.nn.dropout(local7, keep_prob, name='h_fc1_drop')

with tf.name_scope('fnn2'):
    with tf.name_scope('weight8'):
        weight8 = variable_with_weight_loss(shape=[384, 192], stddev=0.04, w1=0.004)
    with tf.name_scope('bias8'):
        bias8 = tf.Variable(tf.constant(0.1, shape=[192]))
    local7 = tf.nn.relu(tf.matmul(local7_drop, weight8) + bias8)
    local7_drop = tf.nn.dropout(local7, keep_prob, name='h_fc1_drop')
with tf.name_scope('inference'):
    with tf.name_scope('weight9'):
        weight9 = variable_with_weight_loss(shape=[192, 10], stddev=1/192.0, w1=0.0)
    with tf.name_scope('bias9'):
        bias9 = tf.Variable(tf.constant(0.0, shape=[10]))
    logits = tf.add(tf.matmul(local7_drop, weight9), bias9)

以上是我随便定义的几个卷积层、池化层等,一般来说数据量多的前提下,网络层数越多,准确度越高(主要目的是学习搭建网络,并没有使用一些Trick) 


with tf.name_scope('loss_func'):
    #求出全部的loss
    loss = loss(logits, label_holder)
    tf.summary.scalar('loss', loss)


######## 求准确率, cifar的label不是one-hot形式,不能使用arg_max
'''
with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
        # 结果存放在一个布尔列表中
        correct_prediction = tf.equal(tf.arg_max(logits, 1), tf.arg_max(label_holder, 1)) # argmax返回一维向量中最大值所在的位置 将1改为-1
    with tf.name_scope('accuracy'):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)
'''

 定义优化器,tf.nn.in_top_k主要是用于计算预测的结果和实际结果的是否相等,返回一个bool类型的张量

with tf.name_scope('train_step'):
    step = tf.train.get_or_create_global_step()
    #调用优化方法Adam,这里学习率是直接设定的自行可以decay尝试一下
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=step)
    top_k_op = tf.nn.in_top_k(logits, label_holder, 1)
    #tf.nn.in_top_k组要是用于计算预测的结果和实际结果的是否相等,返回一个bool类型的张量

创建图,开始训练 

#创建会话
sess = tf.InteractiveSession()
#变量初始化
tf.global_variables_initializer().run()
#合并全部的summary
merged = tf.summary.merge_all()
#将日志文件写入LOG_DIR中
train_writer = tf.summary.FileWriter(LOG_DIR, sess.graph)
#因为数据集读取需要打开线程,这里打开线程
tf.train.start_queue_runners()
#开始迭代训练
for step in range(max_steps):
    start_time = time.time()
    image_batch, label_batch = sess.run([images_train, labels_train])
    image_test, label_test = sess.run([images_test, labels_test])  #######
    summary, _, loss_value = sess.run([merged, train_op, loss], feed_dict={image_holder: image_batch,
                                                                           label_holder: label_batch, keep_prob:0.5})
    #每步进行记录
    train_writer.add_summary(summary, step)
    duration = time.time() - start_time
    if step % 10 == 0:
        examples_per_sec = batch_size / duration
        #训练一个batch的time
        sec_per_batch = float(duration)
        format_str = ('step %d, loss=%.2f (%.1f examples/sec; %.3f sec/batch)')
        print(format_str % (step, loss_value, examples_per_sec, sec_per_batch))
        #train_acc = sess.run([top_k_op], feed_dict={image_holder: image_batch, label_holder: label_batch, keep_prob:1.0})
        #test_acc = sess.run([top_k_op], feed_dict={image_holder: image_test, label_holder: label_test, keep_prob:1.0})
        #print("Iter " + str(step) + " Training Accuracy= " + str(train_acc) + ", Testing Accuracy= " + str(test_acc))
    #if step % 1 == 0:
        num_examples = 10000
        import math

        num_iter = int(math.ceil(num_examples / batch_size))
        true_count = 0
        total_sample_count = num_iter * batch_size
        step = 0
        while step < num_iter:
            image_batch_train, label_batch_train = sess.run([images_train, labels_train])
            image_batch, label_batch = sess.run([images_test, labels_test])
            predictions = sess.run([top_k_op], feed_dict={image_holder: image_batch, label_holder: label_batch,
                                                          keep_prob: 1.0})
            true_count += np.sum(predictions)
            step += 1
        precision = true_count / total_sample_count
        tf.summary.scalar('test accuracy', precision)
        print('precision = %.3f' % precision)

 

 

 模型比较简单,没有使用Trick',最后的准确率大概76%左右(后续会使用tf2.0来搭建更复杂的网络)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

DYF-AI

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值