《深度学习之Tensorflow》学习笔记四

一些损失函数之间的比较

#交叉熵实验
import tensorflow as tf
import numpy as np

# labels = [[0,0,1],[0,1,0]]
# logits = [[2, 0.5,6],[0.1,0,3]]
#
# logits_scaled = tf.nn.softmax(logits)
# logits_scaled2 = tf.nn.softmax(logits_scaled)
#
# result1 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# result2 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits_scaled)
# # result3 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits_scaled2)
# result3 = tf.reduce_mean(-tf.reduce_sum(labels*tf.log(logits_scaled), 1))
#
# with tf.Session() as sess:
#     # print(sess.run(logits_scaled))
#     # print(sess.run(logits_scaled2))
#
#     print(sess.run(result1))
#     print(sess.run(tf.reduce_mean(result1)))
#     print(sess.run(result2))
#     print(sess.run(result3))

# sparse交叉熵的使用
# labels = [2,1]
# result4 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# with tf.Session() as sess:
#     print(sess.run(result4))

global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(initial_learning_rate,
                                           global_step=global_step,
                                           decay_steps=10,decay_rate=0.9)
opt = tf.train.GradientDescentOptimizer(learning_rate)
add_global = global_step.assign_add(1)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(learning_rate))
    for i in range(20):
        print(sess.run([add_global, learning_rate]))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值