letNetlayers.py
import tensorflow as tf INPUT_NODE = 784 OUTPUT_NODE = 10 LAYER1_NODE = 500 IMAGE_SIZE = 28 NUM_CHANNELS = 1 NUM_LABLES = 10 CONV1_DEEP = 32 CONV1_SIZE = 5 CONV2_DEEP =64 CONV2_SIZE = 5 FC_SIZE = 512 # def get_weight_variable(shape, regularizer): # weights= tf.get_variable("weights",shape,initializer=tf.truncated_normal_initializer(stddev=0.1)) # if regularizer != None: # tf.add_to_collection("losses",regularizer(weights)) # return weights def inference(input_tensor, train, regularizer): with tf.variable_scope('layer1-conv1'): conv1_weights = tf.get_variable("weights",[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],initializer=tf\ .truncated_normal_initializer(stddev=0.1)) conv1_biases = tf.get_variable("biases",[CONV1_DEEP],initializer=tf.constant_initializer(0.0)) conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1,1,1,1],padding="SAME") print(conv1) relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases)) with tf.name_scope('layer2-pool1'): pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') with tf.variable_scope('layer3-conv2'): conv2_weights = tf.get_variable("weights", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],initializer=tf\ .truncated_normal_initializer(stddev=0.1)) conv2_biases = tf.get_variable("biases", [CONV2_DEEP], initializer=tf.constant_initializer(0.0)) conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding="SAME") print(pool1) relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) with tf.name_scope('layer4-pool2'): pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') pool_shape = pool2.get_shape().as_list() nodes = pool_shape[1]*pool_shape[2]*pool_shape[3] reshaped = tf.reshape(pool2,[pool_shape[0],nodes]) with tf.variable_scope('layer5-fc1'): fc1_weights = tf.get_variable('weights',[nodes,FC_SIZE],initializer=tf\ .truncated_normal_initializer(stddev=0.1)) if regularizer != None: tf.add_to_collection('losses',regularizer(fc1_weights)) fc1_biase = tf.get_variable("bias",[FC_SIZE],initializer=tf.constant_initializer(0.1)) fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights)+fc1_biase) if train: fc1 = tf.nn.dropout(fc1, 0.5) with tf.variable_scope('layer6-fc2'): fc2_weights = tf.get_variable('weights',[FC_SIZE,NUM_LABLES],initializer=tf\ .truncated_normal_initializer(stddev=0.1)) if regularizer != None: tf.add_to_collection('losses',regularizer(fc2_weights)) fc2_biase = tf.get_variable("bias",[NUM_LABLES],initializer=tf.constant_initializer(0.1)) logit = tf.matmul(fc1,fc2_weights)+fc2_biase return logit
main_train.py
import os import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import letNet BATCH_SIZE = 100 LEARNING_RATE_BASE = 0.8 LEARNING_RATE_DECAY = 0.99 REGULARIZATION_RATE = 0.01 TRAINING_STEPS = 30000 MOVING_AVERAGE_DECAY = 0.99 MODEL_SAVE_PATH = ".\log\model" MODEL_NAME = "model.ckpt" def train(mnist): x = tf.placeholder(tf.float32,[BATCH_SIZE,letNet.IMAGE_SIZE, letNet.IMAGE_SIZE,letNet.NUM_CHANNELS],name='x-input') y_ = tf.placeholder(tf.float32,[None,letNet.OUTPUT_NODE],name='y-input') regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE) y = study_mnist_inference.inference(x,train,regularizer) global_step = tf.Variable(0,trainable=False) variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step) variable_average_op = variable_average.apply(tf.trainable_variables()) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_average_op]): train_op = tf.no_op(name='train') saver = tf.train.Saver() with tf.Session() as sees: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): xs,ys = mnist.train.next_batch(BATCH_SIZE) reshaped_xs = np.reshape(xs,(BATCH_SIZE,letNet.IMAGE_SIZE,letNet.IMAGE_SIZE, letNet.NUM_CHANNELS)) _, loss_value, step = sees.run([train_op,loss,global_step],feed_dict={x: reshaped_xs,y_: ys}) if i % 1000 == 0: print("After %d training step(s),loss on training " "batch is %g." % (step, loss_value)) saver.save(sees,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step) def main(argv=None): mnist = input_data.read_data_sets(".\log\mnist_data",one_hot=True) train(mnist) if __name__ == '__main__': tf.app.run()