softmax和卷积神经网络训练Minist

https://www.cnblogs.com/zf-blog/p/6075286.html

##代码

#input-->conv--->activation--->pool---->fc--->logits--->softmax
import tensorflow as tf
import os
import csv
import argparse
from tensorflow.examples.tutorials.mnist import input_data
import sys
learning_rate_init = 0.001
train_epochs = 1
batch_size =100
display_step = 10

n_input =784
n_classes = 10

FLAGS = None

def weightVariable(shape, name_str,stddev=0.1):
    inital = tf.random_normal(shape= shape,stddev=stddev,dtype=tf.float32)
    return tf.Variable(inital,dtype=tf.float32,name=name_str)


def biasVariable(shape,name_str,stddev=0.00001):
    inital = tf.random_normal(shape=shape,stddev=stddev,name=name_str)
    return inital

def conv2d(x,W,b,stride=1,padding='SAME'):
    with tf.name_scope('conv_wx_b'):
        y = tf.nn.conv2d(x,W,strides=[1,stride,stride,1],padding=padding)
        y= tf.nn.bias_add(y,b)
    return y

def Activation(x,activation=tf.nn.relu,name='relu'):
    with tf.name_scope(name):
        y = activation(x)
    return y

def pool2d(x,pool=tf.nn.max_pool,k=2,stride=2):
    return pool(x,ksize=[1,k,k,1],strides=[1,stride,stride,1],padding='VALID')

def full_connected(x,W,b,activate=tf.nn.relu,act_name='relu'):
    with tf.name_scope('Wx_b'):
        y=tf.matmul(x,W)
        y=tf.add(y,b)
    with tf.name_scope(act_name):
        y=activate(y)
    return y

# def EvaluateModeOnData(sess,images,labels):
#     n_samples = images.shape[0]
#     per_batch_size =  batch_size
#     loss = 0
#     acc = 0
#
#     if(n_samples<=per_batch_size):
#         batch_count = 1
#         loss,acc = sess.run([cross_entropy_loss,accuracy],feed_dict={X_origin:images,Y_true:labels,
#                                                                      learning_rate:learning_rate_init})
#     else:
#         batch_count = int(n_samples/per_batch_size)
#         batch_start = 0
#         for idx in range(batch_count):
#             batch_loss,batch_acc = sess.run([cross_entropy_loss,accuracy],feed_dict={X_origin:images[batch_start:batch_start+per_batch_size,:],
#                                                                                      Y_true:labels[batch_start:batch_start+per_batch_size,:],
#                                                                      learning_rate:learning_rate_init})
#             batch_start += per_batch_size
#             loss += batch_loss
#             acc += batch_acc
#     return loss/batch_count,acc/batch_count




def main(_):
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    with tf.Graph().as_default():

        with tf.name_scope('inputs'):
            X_origin = tf.placeholder(tf.float32,[None,n_input],name='x_o')
            Y_true = tf.placeholder(tf.float32,[None,n_classes],name='y_t')
            #将一维数据转换为二维的784-->28*28
            X_image = tf.reshape(X_origin,[-1,28,28,1])

        with tf.name_scope('inference'):
            with tf.name_scope('conv2d'):
                weights = weightVariable(shape=[5,5,1,24],name_str='weights')
                bias = biasVariable(shape=[24],name_str='bias')
                conv_out = conv2d(X_image,weights,bias,stride=1,padding='VALID')
            with tf.name_scope('activate'):
                activate_out = Activation(conv_out,activation=tf.nn.relu,name='relu')
            with tf.name_scope('pool2d'):
                pool_out = pool2d(activate_out,pool=tf.nn.max_pool,k=2,stride=2)
            with tf.name_scope('featurereshape'):
                feature = tf.reshape(pool_out,[-1,12*12*24])

            with tf.name_scope('FC_Liear'):
                weights = weightVariable(shape=[12*12*24,n_classes],name_str='weights')
                bias = biasVariable(shape=[n_classes],name_str='biases')
                #tf.identity 恒等映射
                Ypred_logits = full_connected(feature,weights,bias,activate=tf.identity,act_name='identity')
                Y_P_max = tf.argmax(Ypred_logits,1)
                Y_T_max = tf.argmax(Y_true,1)
        with tf.name_scope('loss'):
            cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y_true,logits=Ypred_logits))

        with tf.name_scope('train'):
            learning_rate = tf.placeholder(tf.float32)
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
            trainer = optimizer.minimize(cross_entropy_loss)
        with tf.name_scope('Evaluate'):
            correct_pred = tf.equal(tf.argmax(Ypred_logits,1),tf.argmax(Y_true,1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32))

        init = tf.global_variables_initializer();

        print('把节点写入事件文件')
        summary_writer = tf.summary.FileWriter(logdir='logs_3',graph=tf.get_default_graph())
        summary_writer.close()


        results_list = list()
        training_step = 0
        results_list.append(['learning_rate',learning_rate,
                             'train_epochs',train_epochs,
                             'batch_size',batch_size,
                             'display_step',display_step])

        with tf.Session() as sess:
            sess.run(init)

            # total_batches = int(mnist.train.num_examples/batch_size)
            # print('per batch size:',batch_size)
            # print('train sample count:',mnist.train.num_examples)
            # print('tatal batches count:',total_batches)
            # train_step = 0
            #
            # for epoch in range(train_epochs):
            #     for batch_idx in range(total_batches):
            #         batch_x,batch_y = mnist.train.next_batch(batch_size)
            #         sess.run(trainer,feed_dict={X_origin:batch_x,Y_true:batch_y,learning_rate:learning_rate_init})
            #         training_step += 1
            #
            #         if train_step%display_step == 0:
            #             start_idx = max(0,(batch_idx-display_step)*batch_size)
            #             end_idx = batch_idx*batch_size
            #             train_loss,train_acc = EvaluateModeOnData(sess,mnist.train.images[start_idx:end_idx,:],
            #                                                       mnist.train.labels[start_idx:end_idx,:])
            #             print("train step:",str(train_step)," Train loss:","{:.6f}".format(train_loss)," train_acc",
            #                   "{:.6f}".format(train_acc))
            for step in range(1000):
                    batch_xs,batch_ys = mnist.train.next_batch(1000)
                    _,train_loss = sess.run([trainer,cross_entropy_loss],feed_dict={X_origin:batch_xs,Y_true:batch_ys,learning_rate:learning_rate_init})
                    print("train step:",step,"  train_loss:",train_loss)

            ypm = sess.run(Y_P_max,feed_dict={X_origin:mnist.test.images})
            ytm = sess.run(Y_T_max,feed_dict={Y_true:mnist.test.labels})

            for i in range(len(ypm)):
                print ("P 序号:%s   值:%s" % (i + 1, ypm[i]))
                print ("T 序号:%s   值:%s" % (i + 1, ytm[i]))
                    #print("max pre Y : ",ypm)
                    #print("max true Y: ",ytm)

            accuracy_score = sess.run(accuracy,feed_dict={X_origin:mnist.test.images,Y_true:mnist.test.labels})
            print("accuracy_score:",accuracy_score)




if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--data_dir', type=str, default='D:/tensorflow/pys/test1/tmp/tensorflow/mnist/input_data',
                      help='Directory for storing input data')
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值