tensorflow激活函数

# encoding: utf-8
# 案例一
import tensorflow as tf
import numpy as np

SEED = 23455
COST = 1 #成本
PROFIT = 99 #利润

rdm = np.random.RandomState(SEED)
x = rdm.rand(32, 2)
y_ = [[x1 + x2 + (rdm.rand() / 10.0 - 0.05)] for (x1, x2) in x]  # 生成噪声[0,1)/10=[0,0.1); [0,0.1)-0.05=[-0.05,0.05)
x = tf.cast(x, dtype=tf.float32)

w1 = tf.Variable(tf.random.normal([2, 1], stddev=1, seed=1))

epoch = 10000
lr = 0.002

for epoch in range(epoch):
    with tf.GradientTape() as tape:
        y = tf.matmul(x, w1)
        loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * COST, (y_ - y) * PROFIT))

    grads = tape.gradient(loss, w1)
    w1.assign_sub(lr * grads)

    if epoch % 500 == 0:
        print("After %d training steps,w1 is " % (epoch))
        print(w1.numpy(), "\n")
print("Final w1 is: ", w1.numpy())

# 自定义损失函数
# 酸奶成本1元, 酸奶利润99元
# 成本很低,利润很高,人们希望多预测些,生成模型系数大于1,往多了预测
After 0 training steps,w1 is 
[[2.8786578]
 [3.2517848]] 

After 500 training steps,w1 is 
[[1.1460369]
 [1.0672572]] 

After 1000 training steps,w1 is 
[[1.1364173]
 [1.0985414]] 

After 1500 training steps,w1 is 
[[1.1267972]
 [1.1298251]] 

After 2000 training steps,w1 is 
[[1.1758107]
 [1.1724023]] 

After 2500 training steps,w1 is 
[[1.1453722]
 [1.0272155]] 

After 3000 training steps,w1 is 
[[1.1357522]
 [1.0584993]] 

After 3500 training steps,w1 is 
[[1.1261321]
 [1.0897831]] 

After 4000 training steps,w1 is 
[[1.1751455]
 [1.1323601]] 

After 4500 training steps,w1 is 
[[1.1655253]
 [1.1636437]] 

After 5000 training steps,w1 is 
[[1.1350871]
 [1.0184573]] 

After 5500 training steps,w1 is 
[[1.1254673]
 [1.0497413]] 

After 6000 training steps,w1 is 
[[1.1158477]
 [1.0810255]] 

After 6500 training steps,w1 is 
[[1.1062276]
 [1.1123092]] 

After 7000 training steps,w1 is 
[[1.1552413]
 [1.1548865]] 

After 7500 training steps,w1 is 
[[1.1248026]
 [1.0096996]] 

After 8000 training steps,w1 is 
[[1.1151826]
 [1.0409834]] 

After 8500 training steps,w1 is 
[[1.1055626]
 [1.0722672]] 

After 9000 training steps,w1 is 
[[1.1545763]
 [1.1148446]] 

After 9500 training steps,w1 is 
[[1.144956]
 [1.146128]] 

Final w1 is:  [[1.1255957]
 [1.0237043]]
#案例二
# 交叉熵损失函数,更小更接近
loss_ce1 = tf.losses.categorical_crossentropy([1, 0], [0.6, 0.4])
loss_ce2 = tf.losses.categorical_crossentropy([1, 0], [0.8, 0.2])
print("loss_ce1:", loss_ce1)
print("loss_ce2:", loss_ce2)
loss_ce1: tf.Tensor(0.5108256, shape=(), dtype=float32)
loss_ce2: tf.Tensor(0.22314353, shape=(), dtype=float32)
# 案例三
# softmax与交叉熵损失函数的结合,softmax转换成符合概率分布
y_ = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]])
y = np.array([[12, 3, 2], [3, 10, 1], [1, 2, 5], [4, 6.5, 1.2], [3, 6, 1]])
y_pro = tf.nn.softmax(y)
loss_ce1 = tf.losses.categorical_crossentropy(y_,y_pro)
loss_ce2 = tf.nn.softmax_cross_entropy_with_logits(y_, y)

print('分步计算的结果:\n', loss_ce1)
print('结合计算的结果:\n', loss_ce2)
分步计算的结果:
 tf.Tensor(
[1.68795487e-04 1.03475622e-03 6.58839038e-02 2.58349207e+00
 5.49852354e-02], shape=(5,), dtype=float64)
结合计算的结果:
 tf.Tensor(
[1.68795487e-04 1.03475622e-03 6.58839038e-02 2.58349207e+00
 5.49852354e-02], shape=(5,), dtype=float64)
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值