1.本文介绍使用TF2.0实现线性回归问题,主要是线性回归的代码展示,
关于梯度下降法,大家可以参考本文。
本文数据集在此。
import tensorflow as tf
import numpy as np
#以下部分为误差计算公式
def compute_error_for_line_given_points(b,w,points):
total_error = 0
for i in range(0, len(points)):
#i的范围从1到100
x = points[i, 0]
y = points[i, 1]
#计算均方误差
total_error += (y-(w * x +b))** 2
return total_error / float(len(points))
#以下部分是梯度下降法
def step_gradient(b_current, w_current, points, learning_rate):
b_gradient = 0
w_gradient = 0
N = float(len(points))
for i in range(0,len(points)):
x = points[i,0]
y = points[i,1]
#以下计算b的梯度
b_gradient += (2/N) * ((w_current * x + b_current) - y)
#以下计算w的梯度
w_gradient += (2/N) * x *((w_current * x + b_current) - y)
#以下更新b
new_b = b_current - (learning_rate * b_gradient)
#以下更新w
new_w = w_current - (learning_rate * w_gradient)
return [new_b, new_w]
#以下部分是运行梯度下降
def gradient_descent_runner(points, start_b, start_w, learning_rate, num_iterations):
b = start_b
w = start_w
for i in range(num_iterations):
b,w = step_gradient(b, w,np.array(points), learning_rate)
return [b,w]
#以下部分是主程序
def run():
points = np.genfromtxt("data.csv", delimiter=",")
learning_rate = 0.0001
initial_b = 0 # initial y-intercept guess
initial_w = 0 # initial slope guess
num_iterations = 10000
print("Starting gradient descent at b = {0}, w = {1}, error = {2}"
.format(initial_b, initial_w,
compute_error_for_line_given_points(initial_b, initial_w, points))
)
print("Running...")
[b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations)
print("After {0} iterations b = {1}, w = {2}, error = {3}".
format(num_iterations, b, w,
compute_error_for_line_given_points(b, w, points))
)
#error = compute_error_for_line_given_points(b, w, points)
#plt.plot( num_iterations, error, ls='-', lw='3')
#plt.ylabel('compute_error_for_line_given_points')
#plt.xlabel('num_iterations')
if __name__ == '__main__':
run()
2.本文资源来自于https://github.com/dragen1860/Deep-Learning-with-TensorFlow-book。