import tensorflow as tf
import numpy as np
# 创建数据
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# 搭建模型
Weight = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
# Variable 用来创建变量的参数
biases = tf.Variable(tf.zeros([1]))
# 学着把Weight 变成0.1 biases 变成0.3
y = Weight * x_data + biases
loss = tf.reduce_mean(tf.square(y - y_data)) # 计算y 和 y_data 的误差
optimizer = tf.train.GradientDescentOptimizer(0.5) # 0.5自己进行定义
train = optimizer.minimize(loss) # 梯度下降法进行训练
init = tf.global_variables_initializer() # 建立了神经网络结构,必须初始化所有之前定义的Variable
sess = tf.Session() # 逐步提升预测的准确性
sess.run(init) # 非常重要
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(Weight), sess.run(biases))
import tensorflow as tf
import numpy as np
# 创建数据
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# 搭建模型
Weight = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
# Variable 用来创建变量的参数
biases = tf.Variable(tf.zeros([1]))
# 学着把Weight 变成0.1 biases 变成0.3
y = Weight * x_data + biases
loss = tf.reduce_mean(tf.square(y - y_data)) # 计算y 和 y_data 的误差
optimizer = tf.train.GradientDescentOptimizer(0.5) # 0.5自己进行定义
train = optimizer.minimize(loss) # 梯度下降法进行训练
init = tf.global_variables_initializer() # 建立了神经网络结构,必须初始化所有之前定义的Variable
# sess = tf.Session() # 逐步提升预测的准确性
# sess.run(init) # 非常重要
with tf.Session() as sess: # 用上下文进行管理 不用关闭
sess.run(init)
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(Weight), sess.run(biases))
输出结果
0 [-0.24398923] [0.653689]
20 [0.00985567] [0.34723642]
40 [0.08043306] [0.31025326]
60 [0.09575275] [0.30222562]
80 [0.0990781] [0.3004831]
100 [0.09979989] [0.3001049]
120 [0.09995657] [0.30002278]
140 [0.09999057] [0.30000496]
160 [0.09999795] [0.30000108]
180 [0.09999954] [0.30000025]
200 [0.0999999] [0.30000007]