tensorflow(二)

tensorflow(二)——cifar10

下载数据集

import tensorflow as tf 

import cifar10 




#tensorflow 的内部的一个局部变量存储器 
FLAGS = tf.app.flags.FLAGS

FLAGS.data_dir = 'cifar10_data/'

#下载数据集

cifar10.maybe_download_and_extract()


训练

import cifar10
import cifar10_input
import tensorflow as tf 
import numpy as np 
import time   

max_steps = 3000
batch_size = 128

data_dir = 'cifar10_data/cifar-10-batches-bin'


#weigth的L2正则化
def variable_width_weight_loss(shape,stddev,w1):
	var = tf.Variable(tf.truncated_normal(shape,stddev=stddev))
	if w1 is not None:
		weight_loss = tf.multiply(tf.nn.l2_loss(var),w1,name='weight_loss')
		tf.add_to_collection('loss',weight_loss)
	return var 

#数据增强
image_train,labels_train = cifar10_input.distorted_inputs(data_dir=data_dir,batch_size=batch_size)


image_test,labels_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)

image_holder = tf.placeholder(tf.float32,[batch_size,24,24,3])
label_holder = tf.placeholder(tf.int32,[batch_size])

#卷积层


#cov1
weight1 = variable_width_weight_loss(shape=[5, 5, 3, 64],stddev=5e-2,w1=0.0)

conv = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME')
biases1 = tf.Variable(tf.constant(0.0,shape=[64]))
pre_activation = tf.nn.bias_add(conv, biases1) 
conv1 = tf.nn.relu(pre_activation)

#池化
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],padding='SAME')
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)


#cov2
weight2= variable_width_weight_loss(shape=[5, 5, 64, 64],stddev=5e-2,w1=0.0)
conv_2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding='SAME')
biases2 = tf.Variable(tf.constant(0.1,shape=[64]))
pre_activation_2 = tf.nn.bias_add(conv_2, biases2)
conv2 = tf.nn.relu(pre_activation_2)
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
#池化
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],padding='SAME')

#全连接层

#第一层
reshape = tf.reshape(pool2,[batch_size,-1])
dim = reshape.get_shape()[1].value
weight3 = variable_width_weight_loss(shape=[dim,384],stddev=0.04,w1=0.004)
biases3 = tf.Variable(tf.constant(0.1,shape=[384]))
local3 = tf.nn.relu(tf.matmul(reshape,weight3)+biases3)

#第二层
weight4 = variable_width_weight_loss(shape=[384,192],stddev=0.04,w1=0.004)
biases4 = tf.Variable(tf.constant(0.1,shape=[192]))
local4 = tf.nn.relu(tf.matmul(local3,weight4)+biases4)

#第三层
weight5 = variable_width_weight_loss(shape=[192,10],stddev=0.04,w1=0.004)
biases5 = tf.Variable(tf.constant(0.0,shape=[10]))
logits = tf.nn.bias_add(tf.matmul(local4,weight5),biases5)

def loss(logits, labels):
	labels = tf.cast(labels, tf.int64)
	cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
	cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
	tf.add_to_collection('losses', cross_entropy_mean)

	return tf.add_n(tf.get_collection('losses'), name='total_loss')


loss = loss(logits,label_holder)

#adam optimizer 
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

#使用tf.nn.in_top_k函数求得输出结果中top k的准确率,就是输出分数最高的那一类
top_k_op = tf.nn.in_top_k(logits,label_holder,1)

sess = tf.InteractiveSession()

tf.global_variables_initializer().run()

tf.train.start_queue_runners()

for step in range(max_steps):
	start_time = time.time()
	image_batch,label_batch = sess.run([image_train,labels_train])
	_,loss_value = sess.run([train_op,loss],feed_dict={image_holder:image_batch,label_holder:label_batch})

	duration = time.time()-start_time
	if step%10 == 0:
		examples_per_sec = batch_size/duration
		sec_per_batch = float(duration)

		format_str = ('step %d,loss=%.2f (%.1f examples/sec; %.3fsec/batch)')
		print(format_str %(step,loss_value,examples_per_sec,sec_per_batch))


num_examples = 10000
import math

num_iter = int(math.ceil(num_examples/batch_size))

true_count = 0

total_sample_count = num_iter * batch_size

step = 0

while step <num_iter:
	image_batch,label_batch = sess.run([image_test,labels_test])
	predictions = sess.run([top_k_op],feed_dict={image_holder:image_batch,label_holder:label_batch})

	true_count +=np.sum(predictions)
	step +=1 

precision = true_count/total_sample_count

print('precision @ 1 = %.3f'%precision)

结果:
在这里插入图片描述

准确率为73%,我的电脑配置不好,运行很卡,可能影响了准确率。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值