TensorFlow 优化器效果对比

TensorFlow 优化器介绍

from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("MNIST.data",one_hot=True)

print('输入数据:',mnist.train.images)
print('输入数据的大小:',mnist.train.images.shape)
import pylab
im=mnist.train.images[1]
im=im.reshape(-1,28)
pylab.imshow(im)
pylab.show()

tf.reset_default_graph()

X=tf.placeholder(tf.float32,[None,784])
Y=tf.placeholder(tf.float32,[None,10])
W=tf.Variable(tf.random_normal([784,10]))
b=tf.Variable(tf.zeros([10]))
H=tf.matmul(X,W)+b
pred=tf.nn.softmax(H)


cost=tf.reduce_mean(-tf.reduce_sum(Y*tf.log(pred),reduction_indices=1))
learning_rate=0.001
optimizerList=[]

optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
optimizerList.append(optimizer)

optimizer=tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(cost)
optimizerList.append(optimizer)

optimizer=tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost)
optimizerList.append(optimizer)

optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
optimizerList.append(optimizer)

optimizer=tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.5).minimize(cost)
optimizerList.append(optimizer)

batchSize=[]

Optimizer_loss=[]

Accuracy=[]

training_epochs=25
batch_size=100
display_step=1
with tf.Session() as sess: 
    for optimizer in optimizerList:
        sess.run(tf.global_variables_initializer())
        loss=[]
        for epoch in range(training_epochs):
            avg_cost=0.
            total_batch=int(mnist.train.num_examples/batch_size)
            for i in range(batch_size):
                batch_xs,bathc_ys=mnist.train.next_batch(batch_size)
                _,c=sess.run([optimizer,cost],feed_dict={X:batch_xs,Y:bathc_ys})
                avg_cost+=c/total_batch
            if (epoch+1) % display_step == 0:
                print("Epoch:",'%04d'%(epoch+1),"cost=","{:.9f}".format(avg_cost))
                batchSize.append(epoch+1)
                loss.append(avg_cost)
        Optimizer_loss.append(loss)
        
        print("Finished")
        current_prediction=tf.equal(tf.arg_max(pred,1),tf.arg_max(Y,1))
        accuracy=tf.reduce_mean(tf.cast(current_prediction,tf.float32))
        Accuracy.append(accuracy.eval({X:mnist.test.images,Y:mnist.test.labels}))
        print("Accuracy:",Accuracy[-1])
        

optimizerName=['GradientDescentOptimizer','AdadeltaOptimizer','AdagradOptimizer','AdamOptimizer','MomentumOptimizer']
for i in range(len(Optimizer_loss)):
    plt.plot(batchSize[:len(Optimizer_loss[i])],Optimizer_loss[i],label=optimizerName[i])
    print(optimizerName[i],"预测精度:",Accuracy[i])
plt.legend()
plt.xlabel('bach number')
plt.ylabel('loss')
plt.show()

optimizerName=['GradientDescentOptimizer','AdadeltaOptimizer','AdagradOptimizer','AdamOptimizer','MomentumOptimizer']
for i in range(len(Optimizer_loss)):
    plt.plot(batchSize[:len(Optimizer_loss[i])],Optimizer_loss[i],label=optimizerName[i])
    print(optimizerName[i],"预测精度:",Accuracy[i])
plt.legend()
plt.xlabel('bach number')
plt.ylabel('loss')
plt.show()

optimizerName=['GradientDescentOptimizer','AdadeltaOptimizer','AdagradOptimizer','AdamOptimizer','MomentumOptimizer']
for i in range(len(Optimizer_loss)):
    plt.plot(batchSize[:len(Optimizer_loss[i])],Optimizer_loss[i],label=optimizerName[i])
    print(optimizerName[i],"预测精度:",Accuracy[i])
plt.legend()
plt.xlabel('bach number')
plt.ylabel('loss')
plt.show()

对比结果:
在这里插入图片描述
目前的结果是AdamOptimizer优化器效果比较好。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值