LeNet-5 实现

import tensorflow as tf
import numpy as np

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

batch_size = 100
learning_rate = 0.01
learning_rate_decay = 0.99
max_steps = 30000

def hidden_layer(input_tensor, regularizer, avg_class, resuse):
	# print("hello world")
	init = tf.truncated_normal_initializer(stddev=0.1)
	init0 = tf.constant_initializer(0.0)
	init1 = tf.constant_initializer(0.1)
	print("input_tensor : ")
	print(input_tensor.get_shape().as_list())
	# 第一层卷积层
	with tf.variable_scope("C1-conv", reuse=resuse):
		conv1_weights = tf.get_variable("weight", [5,5,1,32], initializer=init)
		conv1_biases  = tf.get_variable("bias", [32], initializer=init0)

		conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1,1,1,1],padding="SAME")
		relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
	print("relu1 : ")
	print(relu1.get_shape().as_list())
	# 第一个池化层
	with tf.name_scope("S2-MAX_pool"):
		pool1 = tf.nn.max_pool(relu1, ksize=[1,2,2,1], strides=[1,2,2,1],padding="SAME")

	# 第二层卷积层
	with tf.variable_scope("C3-conv", reuse=resuse):
		conv2_weights = tf.get_variable("weight", [5, 5, 32, 64], initializer=init)
		conv2_biases = tf.get_variable("bias", [64], initializer=init0)
		conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding="SAME")
		relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
	# relu2 : 10 * 10
	print("relu2 : ")
	print(relu2.get_shape().as_list())
	# 创建第二个池化层
	with tf.name_scope("S4-MAX_pool"):
		pool2 = tf.nn.max_pool(relu2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")

		shape = pool2.get_shape().as_list()
		print("shape :")
		print(shape)
		nodes = shape[1]*shape[2]*shape[3]
		reshaped = tf.reshape(pool2, [shape[0], nodes])

	# 创建第一个全连层
	with tf.variable_scope("layer5-full", reuse = resuse):
		Full_connection1_weight = tf.get_variable("weight", [nodes, 512], initializer = init)
# 		对全连层的权重加入正则化
		tf.add_to_collection("losses", regularizer(Full_connection1_weight))

		Full_connection1_bias   = tf.get_variable("bias", [512], initializer= init1)

		if avg_class == None:
			Full_1 = tf.nn.relu(tf.matmul(reshaped, Full_connection1_weight)+Full_connection1_bias)
		else:
			Full_1 = tf.nn.relu(tf.matmul(reshaped, avg_class.average(Full_connection1_weight)) + avg_class.average(Full_connection1_bias))

		# 创建第二个全连层
		with tf.variable_scope("layer6-full", reuse=resuse):
			Full_connection2_weight = tf.get_variable("weight", [512, 10], initializer=init)
			tf.add_to_collection("losses", regularizer(Full_connection2_weight))
			Full_connection2_bias = tf.get_variable("bias", [10], initializer=init1)
			if avg_class == None:
				Full_2 = tf.matmul(Full_1, Full_connection2_weight) + Full_connection2_bias
			else:
				Full_2 = tf.matmul(Full_1, avg_class.average(Full_connection2_weight)) + avg_class.average(
					Full_connection2_bias)

		return Full_2


x  = tf.placeholder(tf.float32, [batch_size, 28,28,1], name = "x-input")
y_ = tf.placeholder(tf.float32, [None, 10], name = "y-input")
regularizer = tf.contrib.layers.l2_regularizer(0.0001)

y = hidden_layer(x, regularizer, avg_class=None, resuse=False)
training_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(0.99, training_step)
variable_averages_op =  variable_averages.apply(tf.trainable_variables())

average_y = hidden_layer(x, regularizer, variable_averages, resuse=True)

cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))

cross_entropy_mean = tf.reduce_mean(cross_entropy)

loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))

learning_rate = tf.train.exponential_decay(learning_rate, training_step, mnist.train.num_examples/batch_size, learning_rate_decay, staircase=True)

train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=training_step)

with tf.control_dependencies([train_step, variable_averages_op]):
	train_op = tf.no_op(name = "train")

crorent_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))

accuracy = tf.reduce_mean(tf.cast(crorent_prediction, tf.float32))

with tf.Session() as sess:
	tf.global_variables_initializer().run()
	for i in range(max_steps):
		if i % 1000 == 0:
			x_val,y_val = mnist.validation.next_batch(batch_size)
			reshaped_x2 = np.reshape(x_val, (batch_size, 28,28,1))
			validate_feed = {x:reshaped_x2, y_ : y_val}
			validate_accuracy = sess.run(accuracy, feed_dict=validate_feed)
			print("After %d training step(s), validate_accuracy = %g%%"%(i, validate_accuracy*100))

		x_train, y_train = mnist.train.next_batch(batch_size)
		reshaped_xs = np.reshape(x_train, (batch_size,28,28,1))
		sess.run(train_op, feed_dict= {x:reshaped_xs, y_ : y_train})

	x_test, y_test = mnist.test.next_batch(batch_size)
	reshaped_test = np.reshape(x_test, (mnist.test.num_examples, 28, 28, 1))
	print(sess.run(accuracy, feed_dict={x:reshaped_test, y_ : y_test}))

 

LeNet-5神经网络 C源代码,这个写的比较好,可以用gcc编译去跑,结合理论可以对深度学习有更深刻的了解 介绍 根据YANN LECUN的论文《Gradient-based Learning Applied To Document Recognition》设计的LeNet-5神经网络,C语言写成,不依赖任何第三方库。 MNIST手写字符集初代训练识别率97%,多代训练识别率98%。 DEMO main.c文件为MNIST数据集的识别DEMO,直接编译即可运行,训练集60000张,测试集10000张。 项目环境 该项目为VISUAL STUDIO 2015项目,用VISUAL STUDIO 2015 UPDATE1及以上直接打开即可编译。采用ANSI C编写,因此源码无须修改即可在其它平台上编译。 如果因缺少openmp无法编译,请将lenet.c中的#include和#pragma omp parallel for删除掉即可。 API #####批量训练 lenet: LeNet5的权值的指针,LeNet5神经网络的核心 inputs: 要训练的多个图片对应unsigned char二维数组的数组,指向的二维数组的batchSize倍大小内存空间指针。在MNIST测试DEMO中二维数组为28x28,每个二维数组数值分别为对应位置图像像素灰度值 resMat:结果向量矩阵 labels:要训练的多个图片分别对应的标签数组。大小为batchSize batchSize:批量训练输入图像(二维数组)的数量 void TrainBatch(LeNet5 *lenet, image *inputs, const char(*resMat)[OUTPUT],uint8 *labels, int batchSize); #####单个训练 lenet: LeNet5的权值的指针,LeNet5神经网络的核心 input: 要训练的图片对应二维数组 resMat:结果向量矩阵 label: 要训练的图片对应的标签 void Train(LeNet5 *lenet, image input, const char(*resMat)[OUTPUT],uint8 label); #####预测 lenet: LeNet5的权值的指针,LeNet5神经网络的核心 input: 输入的图像的数据 labels: 结果向量矩阵指针 count: 结果向量个数 return 返回值为预测的结果 int Predict(LeNet5 *lenet, image input, const char(*labels)[LAYER6], int count); #####初始化 lenet: LeNet5的权值的指针,LeNet5神经网络的核心
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值