singmod 与relu损失函数的比较

import matplotlib.pyplot as plt import numpy as np import tensorflow as tf

sess = tf.Session() tf.set_random_seed(5) np.random.seed(42)

batch_size = 50 a1 = tf.Variable(tf.random_normal(shape=[1,1])) b1 = tf.Variable(tf.random_uniform(shape=[1,1]))

a2 = tf.Variable(tf.random_normal(shape=[1,1])) b2 = tf.Variable(tf.random_uniform(shape=[1,1]))

x = np.random.normal(2,0.1,500)

x_data = tf.placeholder(shape=[None,1],dtype=tf.float32)

sigmoid_activation = tf.sigmoid(tf.add(tf.matmul(x_data,a1),b1)) relu_activation = tf.nn.relu(tf.add(tf.matmul(x_data,a2),b2))

loss1 = tf.reduce_mean(tf.square(tf.subtract(sigmoid_activation,0.75))) loss2 = tf.reduce_mean(tf.square(tf.subtract(relu_activation,0.75)))

my_opt = tf.train.GradientDescentOptimizer(0.01) train_sigmoid_step = my_opt.minimize(loss1) train_relu_step = my_opt.minimize(loss2)

init = tf.global_variables_initializer() sess.run(init)

loss_vec_sigmoid = [] loss_vec_relu = []

activation_sigmoid = [] activation_relu =[] for i in range(750): rand_index = np.random.choice(len(x),size = batch_size) x_vals = np.transpose([x[rand_index]]) sess.run(train_sigmoid_step,feed_dict={x_data:x_vals}) sess.run(train_relu_step,feed_dict={x_data:x_vals})

loss_vec_sigmoid.append(sess.run(loss1,feed_dict={x_data:x_vals}))
loss_vec_relu.append(sess.run(loss2,feed_dict={x_data:x_vals}))

activation_sigmoid.append(np.mean(sess.run(sigmoid_activation,feed_dict={x_data:x_vals})))
activation_relu.append(np.mean(sess.run(relu_activation,feed_dict={x_data:x_vals})))

plt.plot(loss_vec_relu,'k-') plt.plot(loss_vec_sigmoid,'r--') plt.ylim([0,1.0]) plt.show() plt.plot(activation_sigmoid,'k-') plt.plot(activation_relu,'r--') plt.show()

转载于:https://my.oschina.net/u/2511906/blog/2239948

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值