一些损失函数之间的比较
#交叉熵实验
import tensorflow as tf
import numpy as np
# labels = [[0,0,1],[0,1,0]]
# logits = [[2, 0.5,6],[0.1,0,3]]
#
# logits_scaled = tf.nn.softmax(logits)
# logits_scaled2 = tf.nn.softmax(logits_scaled)
#
# result1 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# result2 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits_scaled)
# # result3 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits_scaled2)
# result3 = tf.reduce_mean(-tf.reduce_sum(labels*tf.log(logits_scaled), 1))
#
# with tf.Session() as sess:
# # print(sess.run(logits_scaled))
# # print(sess.run(logits_scaled2))
#
# print(sess.run(result1))
# print(sess.run(tf.reduce_mean(result1)))
# print(sess.run(result2))
# print(sess.run(result3))
# sparse交叉熵的使用
# labels = [2,1]
# result4 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# with tf.Session() as sess:
# print(sess.run(result4))
global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(initial_learning_rate,
global_step=global_step,
decay_steps=10,decay_rate=0.9)
opt = tf.train.GradientDescentOptimizer(learning_rate)
add_global = global_step.assign_add(1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(learning_rate))
for i in range(20):
print(sess.run([add_global, learning_rate]))