# encoding: utf-8# 案例一import tensorflow as tf
import numpy as np
SEED =23455
COST =1#成本
PROFIT =99#利润
rdm = np.random.RandomState(SEED)
x = rdm.rand(32,2)
y_ =[[x1 + x2 +(rdm.rand()/10.0-0.05)]for(x1, x2)in x]# 生成噪声[0,1)/10=[0,0.1); [0,0.1)-0.05=[-0.05,0.05)
x = tf.cast(x, dtype=tf.float32)
w1 = tf.Variable(tf.random.normal([2,1], stddev=1, seed=1))
epoch =10000
lr =0.002for epoch inrange(epoch):with tf.GradientTape()as tape:
y = tf.matmul(x, w1)
loss = tf.reduce_sum(tf.where(tf.greater(y, y_),(y - y_)* COST,(y_ - y)* PROFIT))
grads = tape.gradient(loss, w1)
w1.assign_sub(lr * grads)if epoch %500==0:print("After %d training steps,w1 is "%(epoch))print(w1.numpy(),"\n")print("Final w1 is: ", w1.numpy())# 自定义损失函数# 酸奶成本1元, 酸奶利润99元# 成本很低,利润很高,人们希望多预测些,生成模型系数大于1,往多了预测
After 0 training steps,w1 is
[[2.8786578]
[3.2517848]]
After 500 training steps,w1 is
[[1.1460369]
[1.0672572]]
After 1000 training steps,w1 is
[[1.1364173]
[1.0985414]]
After 1500 training steps,w1 is
[[1.1267972]
[1.1298251]]
After 2000 training steps,w1 is
[[1.1758107]
[1.1724023]]
After 2500 training steps,w1 is
[[1.1453722]
[1.0272155]]
After 3000 training steps,w1 is
[[1.1357522]
[1.0584993]]
After 3500 training steps,w1 is
[[1.1261321]
[1.0897831]]
After 4000 training steps,w1 is
[[1.1751455]
[1.1323601]]
After 4500 training steps,w1 is
[[1.1655253]
[1.1636437]]
After 5000 training steps,w1 is
[[1.1350871]
[1.0184573]]
After 5500 training steps,w1 is
[[1.1254673]
[1.0497413]]
After 6000 training steps,w1 is
[[1.1158477]
[1.0810255]]
After 6500 training steps,w1 is
[[1.1062276]
[1.1123092]]
After 7000 training steps,w1 is
[[1.1552413]
[1.1548865]]
After 7500 training steps,w1 is
[[1.1248026]
[1.0096996]]
After 8000 training steps,w1 is
[[1.1151826]
[1.0409834]]
After 8500 training steps,w1 is
[[1.1055626]
[1.0722672]]
After 9000 training steps,w1 is
[[1.1545763]
[1.1148446]]
After 9500 training steps,w1 is
[[1.144956]
[1.146128]]
Final w1 is: [[1.1255957]
[1.0237043]]