培训课件-第二天上午

#coding=utf-8
import tensorflow as tf
import numpy as np
import matplotlib .pyplot as plt
from tensorflow .examples .tutorials .mnist import input_data


#下载数据集
mnist=input_data .read_data_sets ("./data_mnist/",one_hot= True )

#假设值参数
batch_size=100
iter=np.int(mnist .train.images.shape[0]/batch_size )
print(iter )

train_state=True

#定义指数衰减学习率


LEARNING_REATE_BASE=0.1
LEARNING_RATE_step=250
LEARNING_RAETE_DECAY=0.99


global_step=tf.Variable (initial_value= 0,trainable= False )
learning_rate=tf.train.exponential_decay(learning_rate=LEARNING_REATE_BASE,
global_step= global_step ,
decay_steps= LEARNING_RATE_step ,
decay_rate=LEARNING_RAETE_DECAY ,
staircase= True )



#定义droupout


keep_dropout=0.85


#将加载的数据显示一下

# image,label=mnist.train.next_batch(1)
# image=image.reshape(28,28)
# print(image.shape)
# print(label)
#
# plt.figure()
# plt.imshow (image)
# plt.show()


#定义一个BP网络

#定义一个



#定义网络的输入变量
x_input=tf.placeholder ( shape= [None,784],dtype= tf.float32 )

y_input=tf.placeholder (shape= [None,10],dtype= tf.float32 )



#定义输入层

W1=tf.Variable (tf.truncated_normal (shape= [784,1000],stddev= 1,seed= 1))
b1=tf.Variable (tf.constant (shape= [1000],value=0.1))
L1=tf.nn.tanh(tf.matmul (x_input ,W1 )+b1 )


#定义隐含层

W2=tf.Variable (tf.truncated_normal (shape=[1000,100],stddev= 1,seed=1))
b2=tf.Variable (tf.constant (shape=[100],value=0.1))
L2=tf.nn.tanh(tf.matmul (L1 ,W2)+b2)


h_dropout=tf.nn.dropout (L2,keep_prob= keep_dropout )

#定义输出层
W3=tf.Variable (tf.truncated_normal (shape= [100,10],stddev= 1,seed=1))
b3=tf.Variable (tf.constant (shape=[10],value=0.1))
prediction=tf.nn.softmax (tf.matmul (h_dropout,W3)+b3 )



#求准确率
correct_predict=tf.equal (tf.argmax (prediction ,axis= 1),tf.argmax (y_input ,axis= 1))
acc=tf.reduce_mean (tf.cast(correct_predict ,dtype= tf.float32 ))




#定义反向传导过程
#定义损失函数
cross_entropy=-tf.reduce_mean (y_input *tf.log(prediction ))
train_step=tf.train.GradientDescentOptimizer (learning_rate).minimize(cross_entropy ,global_step= global_step )



#初始化全局变量
init=tf.global_variables_initializer ()



#保存模型

saver=tf.train.Saver ()


#绘图

with tf.Session () as sess:
sess.run(init )
if(train_state ):
for i in range(150):
X,Y=mnist .test.next_batch(100)
xt, yt = mnist.train.next_batch(100)
for j in range(iter ):
xs,ys=mnist.train.next_batch(batch_size )
sess.run(train_step,feed_dict= {x_input :xs,y_input :ys})
acc1,learning_rate1= sess.run([acc ,learning_rate],feed_dict={x_input: X, y_input: Y})
acc2_train = sess.run(acc, feed_dict={x_input: xt, y_input: yt})
# print("acc=,lea_rate=",acc1,learning_rate1)
print("test_acc= train_acc=", acc1, acc2_train)
saver .save(sess,save_path= "./model/save_net.ckpt")
else:
saver=tf.train.import_meta_graph(meta_graph_or_file= "./model/save_net.ckpt.meta")
saver .restore(sess,save_path= "./model/save_net.ckpt")
X, Y = mnist.test.next_batch(100)
xt,yt=mnist.train.next_batch (100)
acc2_test = sess.run(acc, feed_dict={x_input: X, y_input: Y})
acc2_train= sess.run(acc, feed_dict={x_input: xt, y_input: yt})
print("test_acc= train_acc=",acc2_test,acc2_train)

转载于:https://www.cnblogs.com/shuimuqingyang/p/11115886.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值