设置四个输入x1,x2,x3,x4,y为输出,通过TensorFlow拟合得到各自系数。
自定义数据集,x1,x2,x3,x4为随机生成(0,1)之间的随机数,y为x1+x2+x3+x4+随机噪声。
x为(49,4)49行4列的数据,y为49行1列;初始参数w1【4,1】4行1列,b1位1行1列;损失函数为均方误差,每500轮打印一次。
代码如下:
import tensorflow as tf
import numpy as np
SEED = 23455
rdm = np.random.RandomState(seed=SEED) # 生成[0,1)之间的随机数
x = rdm.rand(49, 4) #生成49行4列的特征x
y_ = [[x1 + x2 +x3+x4+ (rdm.rand() / 10.0 - 0.05)] for (x1, x2, x3,x4) in x] # 生成噪声[0,1)/10=[0,0.1); [0,0.1)-0.05=[-0.05,0.05)
x = tf.cast(x, dtype=tf.float32)
w1 = tf.Variable(tf.random.normal([4, 1], stddev=1, seed=1)) #随机初始化参数w1,4行1列
b1 = tf.Variable(tf.random.normal([1], stddev=1, seed=1)) #xinjia
epoch = 15000 #数据集迭代15000
lr = 0.002
for epoch in range(epoch):
with tf.GradientTape() as tape:
y = tf.matmul(x, w1)+b1 #前向传播计算结果y #
loss_mse = tf.reduce_mean(tf.square(y_ - y)) #均方误差损失函数loss
grads = tape.gradient(loss_mse, [w1,b1]) #损失函数对待训练参数w1求偏导 #
w1.assign_sub(lr * grads[0]) #更新参数w1
b1.assign_sub(lr * grads[1]) # 参数b自更新 #
if epoch % 500 == 0:
print("After %d training steps,w1 is " % (epoch))
print(w1.numpy(), "\n")
print("Final w1 is: ", w1.numpy())
print("Final b1 is: ", b1.numpy())