3.1.2随机梯度下降法

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

for_mat = 'Advertising.csv'
advertising = pd.read_csv(for_mat)
advertising.head()
x, y = 0.5, 0.8

"""绘制RSS关于w的函数图像"""


def plo_t():
    w = 3

    pred = x * w
    rss = ((pred - y) ** 2) / 2
    grad = (pred - y) * x
    print("当w=3时,预测值为" + str(pred))
    print("当w=3时,残差平方和为" + str(rss))
    print("当w=3时,RSS(w)的梯度为" + str(grad))

    w_vec = np.linspace(-1, 4, 100)
    rss_vec = []
    for w_tmp in w_vec:
        rss_tmp = (y - x * w_tmp) ** 2 / 2
        rss_vec.append(rss_tmp)
    """
    画出残差平方和随着权重变化的曲线
    当w=3时,画出RSS(w)的斜率
    """
    plt.plot(w_vec, rss_vec)
    # 画出w=3时对应RSS的散点图
    plt.scatter(w, rss, s=100, c="y", marker="o")
    # 通过当w=3时的切线
    plt.plot(np.linspace(2.5, 3.5, 50), np.linspace(2.5, 3.5, 50) * 0.35 - 0.805, "--", linewidth=2.0)
    plt.xlabel("w", fontsize=16)
    plt.ylabel("RSS", fontsize=16)
    plt.show()


# plo_t() #调用绘制函数

"""算法3.1  梯度下降算法"""
w = 0
lr = 0.5
pred = x * w
loss = ((pred - y) ** 2) / 2
grad = (pred - y) * x
print("自变量的值:" + str(x))
print("真实因变量:" + str(y))
print("初始权重:" + str(w))
print("初始预测值:" + str(pred))
print("初始误差:" + str(loss))
print("初始梯度:" + str(grad))
"""
定义迭代函数
更新迭代后的预测值,预测误差,梯度
"""


def ite_re(w_, lr_, grad_, count):
    count = int(count)
    for i in range(1, count):
        w_ = w_ - lr_ * grad_
        pred_ = x * w_
        loss_ = ((pred_ - y) ** 2) / 2
        grad_ = (pred_ - y) * x
        print(f"第{i}次更新后的权重:" + str(w_))
        print(f"第{i}次更新后的预测值:" + str(pred_))
        print(f"第{i}次更新后的误差:" + str(loss_))
        print(f"第{i}次更新后的梯度:" + str(grad_))
        print("\n\n")


# ite_re(w, lr, grad, 20)  # 迭代更新20次
"""算法3.2       使用随机梯度下降法迭代更新w"""


def a():
    """
    对自变量矩阵x,因变量向量y
    对数据进行标准化和中心化得到scaled_x和centered_y
    """
    x_ = advertising.iloc[:, 0:2].values
    y_ = advertising.iloc[:, 3].values
    scaled_x = (x_ - np.mean(x_, axis=0, keepdims=True)) / np.std(x_, axis=0, keepdims=True)
    centered_y = y_ - np.mean(y_)
    lr_ = 0.1
    w_ = np.zeros(2)
    w_record = [w_.copy()]
    for item in range(5):
        total_loss = 0
        for i in range(len(scaled_x)):
            pred_ = np.sum(scaled_x[i] * w_)
            total_loss += ((pred_ - centered_y[i]) ** 2) / 2
            delta = (pred_ - centered_y[i])
            w_ -= lr_ * (delta * scaled_x[i])
            w_record.append(w_.copy())
        c = total_loss / (i + 1)
        print(c)
    print(w_)


a()

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

蝶戏花间

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值