简单线性回归——梯度下降

我们需要用到上面的五个公式来进行梯度下降

绘制线性回归图像

def compute_model_output(x,w,b):
    m = x.shape[0]  ###获取x数组的大小
    f_wb = np.zeros(m)  ###创建一个f_wb数组大小为m,并且全置为0

    for i in range(m):
        f_wb[i] = x[i] * w + b

    return f_wb

###计算损失函数的值
def compute_cost(x, y, w, b):
    m = x.shape[0]
    cost = 0
    for i in range(m):
        f_wb = w * x[i] + b
        cost += (f_wb - y[i]) ** 2
    total_cost = 1 / (2 * m) * cost
    return total_cost

###计算梯下降的值
def compute_gradient(x, y, w, b):
    m = x.shape[0]
    dj_dw = 0
    dj_db = 0
    for i in range(m):
        f_wb = w * x[i] + b
        dj_dw += (f_wb - y[i]) * x[i]
        dj_db += (f_wb - y[i])
    dj_dw = dj_dw / m
    dj_db = dj_db / m
    return dj_dw, dj_db

###进行梯度下降的运算
def gradient_descent(x, y, w_in, b_in, alpha, num_iters, cost_function, gradient_function):
    w = w_in
    b = b_in
    J_history = []
    p_history = []
    for i in range(num_iters):
        dj_dw, dj_db = gradient_function(x, y, w, b)
        w = w - alpha * dj_dw
        b = b - alpha * dj_db
        J = cost_function(x, y, w, b)
        J_history.append(J)
        p_history.append([w, b])
    return w, b, J_history, p_history

绘制

###使用随机函数生成随机数来进行观察
num_samples = 100
x_train = np.random.uniform(1, 30, num_samples)
y_train = 100 * x_train + np.random.normal(0, 200, num_samples)  # 减小斜率和截距

# 先初始化w,b的值
w_init = 0
b_init = 0

# 设置梯度下降的迭代次数和步长
iters = 100
alpha = 1.0e-4

# 获取梯度下降后的w,b的值
w_final, b_final, J_hist, p_hist = gradient_descent(x_train, y_train, w_init, b_init, alpha, iters, compute_cost,
                                                    compute_gradient)

# 创建画布
fig, (ax1, ax2) = plt.subplots(1, 2,  figsize=(12,4))
# 绘制成本函数随迭代次数的变化
# 绘制成本函数随迭代次数的变化
ax1.plot(range(len(J_hist)), J_hist)

# 设置标题和坐标轴标签
ax1.set_title("Jfun")
ax1.set_ylabel('Cost')
ax1.set_xlabel('Iteration')

# 显示图形


# 找到成本函数值最小的索引
min_J_index = np.argmin(J_hist)
###min_w, min_b = w_final[min_J_index], b_final[min_J_index]


tmp_f_wb = compute_model_output(x_train,w_final,b_final)
'''
# 使用最小成本对应的 w 和 b 绘制线性回归线
plt.plot(x_train,tmp_f_wb,c = 'b',label='Prediction')
plt.scatter(x_train,y_train,marker = 'x',c = 'r',)
plt.ylabel('Prices (RMB)')
plt.xlabel('square_meters')
plt.show()
'''



# 使用最小成本对应的 w 和 b 绘制线性回归线
ax2.scatter(x_train, y_train, color='blue', label='Data')
ax2.plot(x_train, w_final * x_train.ravel() + b_final, color='red', linewidth=2, label='Linear Regression (min J)')
ax2.set_xlabel('X')
ax2.set_ylabel('y')
ax2.set_title('Linear Regression Fit (with minimum J)')
ax2.legend()
plt.show()

完整:

import numpy as np
import matplotlib.pyplot as plt

'''
进行梯度下降来训练数据
得到一个损失值的曲线
一个训练后的图像

'''

def compute_model_output(x,w,b):###计算训练后的w,b的值的线性回归的图像
    m = x.shape[0]  ###获取x数组的大小
    f_wb = np.zeros(m)  ###创建一个f_wb数组大小为m,并且全置为0

    for i in range(m):
        f_wb[i] = x[i] * w + b

    return f_wb

###计算损失函数的值
def compute_cost(x, y, w, b):
    m = x.shape[0]
    cost = 0
    for i in range(m):
        f_wb = w * x[i] + b
        cost += (f_wb - y[i]) ** 2
    total_cost = 1 / (2 * m) * cost
    return total_cost


###计算梯下降的值
def compute_gradient(x, y, w, b):
    m = x.shape[0]
    dj_dw = 0
    dj_db = 0
    for i in range(m):
        f_wb = w * x[i] + b
        dj_dw += (f_wb - y[i]) * x[i]
        dj_db += (f_wb - y[i])
    dj_dw = dj_dw / m
    dj_db = dj_db / m
    return dj_dw, dj_db

###进行梯度下降的运算
def gradient_descent(x, y, w_in, b_in, alpha, num_iters, cost_function, gradient_function):
    w = w_in
    b = b_in
    J_history = []
    p_history = []
    for i in range(num_iters):
        dj_dw, dj_db = gradient_function(x, y, w, b)
        w = w - alpha * dj_dw
        b = b - alpha * dj_db
        J = cost_function(x, y, w, b)
        J_history.append(J)
        p_history.append([w, b])
    return w, b, J_history, p_history


###使用随机函数生成随机数来进行观察
num_samples = 100
x_train = np.random.uniform(1, 30, num_samples)
y_train = 100 * x_train + np.random.normal(0, 200, num_samples)  # 减小斜率和截距

# 先初始化w,b的值
w_init = 0
b_init = 0

# 设置梯度下降的迭代次数和步长
iters = 100
alpha = 1.0e-4

# 获取梯度下降后的w,b的值
w_final, b_final, J_hist, p_hist = gradient_descent(x_train, y_train, w_init, b_init, alpha, iters, compute_cost,
                                                    compute_gradient)

# 创建画布
fig, (ax1, ax2) = plt.subplots(1, 2,  figsize=(12,4))
# 绘制成本函数随迭代次数的变化
# 绘制成本函数随迭代次数的变化
ax1.plot(range(len(J_hist)), J_hist)

# 设置标题和坐标轴标签
ax1.set_title("Jfun")
ax1.set_ylabel('Cost')
ax1.set_xlabel('Iteration')

# 显示图形


# 找到成本函数值最小的索引
min_J_index = np.argmin(J_hist)
###min_w, min_b = w_final[min_J_index], b_final[min_J_index]


tmp_f_wb = compute_model_output(x_train,w_final,b_final)
'''
# 使用最小成本对应的 w 和 b 绘制线性回归线
plt.plot(x_train,tmp_f_wb,c = 'b',label='Prediction')
plt.scatter(x_train,y_train,marker = 'x',c = 'r',)
plt.ylabel('Prices (RMB)')
plt.xlabel('square_meters')
plt.show()
'''



# 使用最小成本对应的 w 和 b 绘制线性回归线
ax2.scatter(x_train, y_train, color='blue', label='Data')
ax2.plot(x_train, w_final * x_train.ravel() + b_final, color='red', linewidth=2, label='Linear Regression (min J)')
ax2.set_xlabel('X')
ax2.set_ylabel('y')
ax2.set_title('Linear Regression Fit (with minimum J)')
ax2.legend()
plt.show()



 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值