import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
#1 构造待模拟线性回归的点
num_points = 10000
vector_set = []
for i in range(num_points):
x1 = np.random.normal(0.0, 0.55)
y1 = x1 * 0.1 + 0.3 + np.random.normal(0.0, 0.03)
vector_set.append([x1, y1])
x_data = [v[0] for v in vector_set]
y_data = [v[1] for v in vector_set]
#2 定义计算框架
##2.1 定义拟合函数
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name="w")
b = tf.Variable(tf.zeros([1]), name="b")
y = w * x_data + b
##2.2 定义loss函数
loss = tf.reduce_mean(tf.square(y - y_data) * 1.0, name="lose")
##2.3 定义优化器梯度下降法
optimizer = tf.train.GradientDescentOptimizer(0.5)
##2.4 定义训练目标
train = optimizer.minimize(loss, name='train')
#3 迭代计算进行训练
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print("w=", sess.run(w), ", b=", sess.run(b), ", loss=", sess.run(loss))
for _ in range(100):
sess.run(train)
print("w=", sess.run(w), ", b=", sess.run(b), ", loss=", sess.run(loss))
##这里增加了一个步骤,将拟合出的函数显示到坐标图上
plt.scatter(x_data, y_data, c="r")
plt.plot(x_data, sess.run(w) * x_data + sess.run(b))
plt.show()
以上代码出处为以下课程,本人在完成该课程学习时,在本地环境运行了以上代码并在注释中增加了自己的理解。
http://study.163.com/course/courseMain.htm?courseId=1003606092