用tensorflow实现线性回归问题
最新推荐文章于 2021-03-08 15:49:07 发布
# -*- coding: UTF-8 -*- import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #忽略警告 num_points = 1000#定义噪点的个数 vectors_set = [] for i in range(num_points):#生成1000个随机点 x1 = np.random.normal(0.0,0.55) y1 = x1 * 2 + 3 + np.random.normal(0.0,0.9) vectors_set.append([x1,y1]) x_data = [v[0] for v in vectors_set]#想数据 y_data = [v[1] for v in vectors_set]#y数据 W = tf.Variable(tf.random_uniform([1],-1.0,1.0),name = 'W')#定义权值W,范围在-1~1之间 b = tf.Variable(tf.zeros([1]),name = 'b')#定义偏置b,默认为0 y = W * x_data + b #以均方差来作为损失 loss = tf.reduce_mean(tf.square(y-y_data),name = 'loss') #用梯度下降法来训练,括号里的参数为学习率 optimizer = tf.train.GradientDescentOptimizer(0.5) #训练过程就是最小化这个差值 train = optimizer.minimize(loss,name = 'train') sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) print('W = ',sess.run(W),' b=',sess.run(b),' loss = ',sess.run(loss)) for i in range(500): sess.run(train) print('W = ',sess.run(W),' b=',sess.run(b),' loss = ',sess.run(loss)) plt.figure() plt.scatter(x_data,y_data,c='r') plt.plot(x_data,sess.run(W)*x_data + sess.run(b),'b') plt.show() sess.close()