tensorflow 卷积神经网络实现CIFAR-10数据集识别

参考链接:http://wiki.jikexueyuan.com/project/tensorflow-zh/tutorials/deep_cnn.html

http://blog.csdn.net/zeuseign/article/details/72773342

首先从github上下载cifar10.py和cifar10_input.py文件用于下载cifar-10数据集和产生相应的样本数据

下载地址:https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10

主要补充一下几点:

(1)参考博客:http://blog.csdn.net/uestc_c2_403/article/details/72415791

tf.add_to_collection:把变量放入一个集合,把很多变量变成一个列表

tf.get_collection:从一个集合中取出全部变量,是一个列表

tf.add_n:把列表中的变量都依次加起来

(2)tf.nn.in_top_k组要是用于计算预测的结果和实际结果的是否相等,返回一个bool类型的张量。

tf.nn.in_top_k(prediction, target, K):prediction就是表示你预测的结果,大小就是预测样本的数量乘以输出的维度,类型是tf.float32等。target就是实际样本类别的标签,大小就是样本数量的个数。K表示每个样本的预测结果的前K个最大的数里面是否含有target中的值。一般都是取1。

import cifar10
import cifar10_input
import tensorflow as tf
import numpy as np
import time
import matplotlib.pyplot as plt

# 下载和解压数据
# cifar10.maybe_download_and_extract() 
max_steps = 1000
batch_size = 128
data_dir = 'cifar10_data/cifar-10-batches-bin'


def variable_with_weight_loss(shape, stddev, wl):
    var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))
    if wl is not None:
        weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name='weight_loss')
        tf.add_to_collection('losses', weight_loss)
    return var


images_train, labels_train = cifar10_input.distorted_inputs(
    data_dir=data_dir, batch_size=batch_size)
image_test, labels_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)

image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3])
labels_holder = tf.placeholder(tf.int32, [batch_size])

weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=0.05, wl=0.0)
kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME')
bias1 = tf.Variable(tf.constant(0.0, shape=[64]))
conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
norm1 = tf.nn.lrn(pool1, bias=1.0, alpha=0.001/9.0, beta=0.75)

weight2 = variable_with_weight_loss(shape=[5, 5, 64, 64], stddev=0.05, wl=0.0)
kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding='SAME')
bias2 = tf.Variable(tf.constant(0.1, shape=[64]))
conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))
norm2 = tf.nn.lrn(conv2, bias=1.0, alpha=0.001/9.0, beta=0.75)
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

reshape = tf.reshape(pool2, [batch_size, -1])
dim = reshape.get_shape()[1].value
weight3 = variable_with_weight_loss(shape=[dim, 384], stddev=0.04, wl=0.004)
bias3 = tf.Variable(tf.constant(0.1, shape=[384]))
local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3)

weight4 = variable_with_weight_loss(shape=[384, 192], stddev=0.04, wl=0.004)
bias4 = tf.Variable(tf.constant(0.1, shape=[192]))
local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4)

weight5 = variable_with_weight_loss(shape=[192, 10], stddev=1/192.0, wl=0.0)
bias5 = tf.Variable(tf.constant(0.0, shape=[10]))
logits = tf.matmul(local4, weight5) + bias5


def loss(logits, labels):
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    tf.add_to_collection('losses', cross_entropy_mean)

    return tf.add_n(tf.get_collection('losses'), name='total_loss')


loss = loss(logits, labels_holder)
train_op = tf.train.AdamOptimizer(0.003).minimize(loss)
top_k_op = tf.nn.in_top_k(logits, labels_holder, 1)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.train.start_queue_runners()

timex = []
result = []
start_time = time.time()
for step in range(max_steps):
    image_batch, label_batch = sess.run([images_train, labels_train])
    sess.run(train_op, feed_dict={image_holder: image_batch, labels_holder: label_batch})
    if step % 5 == 0:
        predictions = sess.run(top_k_op, feed_dict={image_holder: image_batch, labels_holder: label_batch})
        print(np.sum(predictions) / batch_size)
        timex.append(time.time() - start_time)
        result.append(np.sum(predictions) / batch_size)

plt.plot(timex, result, marker='o')
plt.xlabel('时间(s)--每个时间间隔进行5次迭代', fontproperties='SimHei')
plt.ylabel('准确率', fontproperties='SimHei')
plt.title('CIFAR-10在CNN上的准确率与迭代次数关系', fontproperties='SimHei')
plt.show()

# 测试样本
num_examples = 10000
true_count = 0
step = 1
while step * batch_size < num_examples:
    image_batch, label_batch = sess.run([image_test, labels_test])
    predictions = sess.run([top_k_op], feed_dict={image_holder: image_batch,
                                                  labels_holder: label_batch})
    true_count += np.sum(predictions)
    step += 1

precision = true_count / ((step - 1) * batch_size)
print('precision: %0.3f' % precision)


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值