import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
# 取消红字
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
def model(x, w, b):
return tf.multiply(x, w)+b
if __name__ == "__main__":
np.random.seed(5)
x_data = np.linspace(-1, 1, 100)
y_data = 2*x_data+1.0 + np.random.randn(*x_data.shape) *0.4
# 画图
plt.scatter(x_data, y_data) # 散点图
plt.plot(x_data, 1.0+2*x_data, color='r', linewidth=3)
x = tf.placeholder('float', name='x')
y = tf.placeholder('float', name='y')
w = tf.Variable(1.0, name='w0')
b = tf.Variable(0.0, name='b0')
pred = model(x, w, b)
train_epochs = 10
learning_rate = 0.01
loss_function = tf.reduce_mean(tf.square(y-pred))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range (train_epochs):
for xs,ys in zip(x_data, y_data):
_, loss = sess.run([optimizer, loss_function], feed_dict={x:xs, y:ys})
b0tmp = b.eval(sess)
w0tmp = w.eval(sess)
plt.plot(x_data, b0tmp + w0tmp * x_data, color='b', linewidth=3)
print(b0tmp, w0tmp)
plt.show()