线性回归
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
data=3*np.arange(1000)+np.random.rand(1000)*100 #制造数据y=3x
# plt.plot(data)
# plt.show()
train_w=tf.constant(0.0) #注意要使用0.0,dtype:float32
train_b=tf.constant(0.0)
true_y=tf.convert_to_tensor(data)
true_y=tf.cast(true_y,dtype=tf.float32) #能与后面进行运算
true_x=tf.range(1000.0) #注意使用.0
train_dataset = tf.data.Dataset.from_tensor_slices((true_x, true_y))
train_dataset = train_dataset.batch(200)
for i in range(10):
for step, (true_x,true_y) in enumerate(train_dataset):
with tf.GradientTape() as tape:
tape.watch([train_b,train_w]) #如果是tf.Variable类型就不用
y=train_w*true_x+train_b
loss=tf.reduce_sum((y-true_y)**2)/true_x.shape[0] #等于loss=tf.reduce_mean((y-true_y)**2)
grads=tape.gradient(loss,[train_b,train_w]) #自动求导
print(grads[1].numpy()) #可观察大小,确定学习率
train_b-=(grads[0]*1e-7) #学习率太大,会导致梯度正负变化,且loss会趋于inf
train_w-=(grads[1]*1e-7)
if step %2 ==0:
print('i=%i,w=%f,b=%f,loss=%f' %(step,train_w.numpy(),train_b.numpy(),loss.numpy()))
print(train_w.numpy(),train_b.numpy())
运行结果
2.980926 0.0046978365