其实这是一篇很无聊的博客= =只是想做个纪念而已嘻嘻嘻
import numpy as np
import tensorflow as tf
x_data = np.random.rand(100).astype(np.float32) # TF中大部分数据的type是float32
y_data = x_data * 0.1 + 0.3
# create tensorflow structure start
Weight = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) # Weight可能是一个一维结构,初始化范围[-1, 1]
biases = tf.Variable(tf.zeros([1])) # bias初始值为0
y = Weight * x_data + biases # 预测的y
loss = tf.reduce_mean(tf.square(y-y_data)) # 计算预测的y和实际的y的差别
optimizer = tf.train.GradientDescentOptimizer(0.5) # 建立优化器
train = optimizer.minimize(loss) # 用优化器减少误差 每一步训练都要做
init = tf.global_variables_initializer()
# create tensorflow structure end
sess = tf.Session()
sess.run(init) # run的时候,session就像一个指针,指向了要处理的地方。此时要处理的地方就被激活
for step in range(201): # 训练201步
sess.run(train)
if step % 20 == 0:
print(step, sess.run(Weight), sess.run(biases))
输出:
0 [ 0.64853036] [-0.00024236]
20 [ 0.23654322] [ 0.22672826]
40 [ 0.1338377] [ 0.28184205]
60 [ 0.10838556] [ 0.29550016]
80 [ 0.10207809] [ 0.29888487]
100 [ 0.10051499] [ 0.29972365]
120 [ 0.10012762] [ 0.29993153]
140 [ 0.10003164] [ 0.29998302]
160 [ 0.10000783] [ 0.29999581]
180 [ 0.10000193] [ 0.29999897]
200 [ 0.10000048] [ 0.29999974]