迭代法写线性回归

import numpy as np

def compute_error_points(b, w, points):
total_error = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
# 计算均方误差
total_error += (y - (w * x + b)) ** 2
# 返回loss
return total_error / float(len(points))

def step_gradient(b_current, w_current, points, learning_rate):
b_gradient = 0
w_gradient = 0
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
# 求b的偏导
b_gradient += (2/N) * ((w_current * x + b_current) - y)
# 求w的偏导
w_gradient += (2/N) * x * ((w_current * x + b_current) - y)

new_b = b_current - (learning_rate * b_gradient)
new_w = w_current - (learning_rate * w_gradient)

# 返回迭代的b和w
return [new_b, new_w]

def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
'''
:param points: data
:param starting_b: 开始的b
:param starting_w: 开始的w
:param learning_rate: 迭代率
:param num_iterations: 迭代次数
:return:
'''
b = starting_b
w = starting_w
# 迭代w和b
for i in range(num_iterations):
b, w = step_gradient(b, w, np.array(points), learning_rate)
return [b, w]

points = np.random.random(size=(100, 2))
b, w = gradient_descent_runner(points, 0, 0, 0.0001, 1000)
print(b, w)


转载于:https://www.cnblogs.com/abc23/p/11020541.html

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值