机器学习_线性回归梯度下降法求解

import numpy as np
import pandas as pd
import sklearn.datasets as dataset
import matplotlib.pyplot as plt
import matplotlib as mpl


class LinearRegression:
    def __init__(self, alpha, times):
        """
        alpha:float
            学习率,用来控制步长(权重调整的幅度)
        times:int
            循环迭代次数
        """
        self.alpha = alpha
        self.times = times

    def fit(self, X, y):
        X = np.asarray(X)
        y = np.asarray(y)
        # 创建权重的向量,初始值为零
        self.W_ = np.zeros(1 + X.shape[1])
        # 创建损失列表,用来计算每次
        self.loss = []
        # 进行循环,每次进行迭代计算
        for i in range(self.times):
            y_hat = np.dot(X, self.W_[1:]) + self.W_[0]
            # 计算真实值和预测值之间的差距
            error = y - y_hat
            # 将损失值加入到损失列表
            self.loss.append(np.sum(error ** 2) / 2)
            # 根据差距调整权重w_
            self.W_[0] += self.alpha * np.sum(error)
            self.W_[1:] += self.alpha * np.dot(X.T, error)

    def predict(self, X):
        X = np.asarray(X)
        result = np.dot(X, self.W_[1:]) + self.W_[0]
        return result


class StandardScaler:
    def fit(self, X):
        X = np.asarray(X)
        self.std = np.std(X, axis=0)
        self.mean = np.mean(X, axis=0)

    def transform(self, X):
        return (X - self.mean) / self.std

    def fit_transform(self, X):
        self.fit(X)
        return self.transform(X)


if __name__ == '__main__':
    boston = dataset.load_boston()
    col_names = np.append(boston.feature_names, 'MEDV')
    data = pd.DataFrame(data=np.c_[boston.data, boston.target], columns=col_names)

    data = data.sample(len(data), random_state=0)
    train_x = data.iloc[:400, :-1]
    train_y = data.iloc[:400, -1]
    test_x = data.iloc[400:, :-1]
    test_y = data.iloc[400:, -1]
    S = StandardScaler()
    train_x = S.fit_transform(train_x)
    test_x = S.transform(test_x)

    S1 = StandardScaler()
    train_y = S1.fit_transform(train_y)
    test_y = S1.transform(test_y)
    LR = LinearRegression(alpha=0.00005, times=100000)
    # print(train_y)
    LR.fit(train_x, train_y)

    result = LR.predict(test_x)
    print(np.mean((result - test_y) ** 2))
    mpl.rcParams['font.family'] = 'SimHei'
    mpl.rcParams['axes.unicode_minus'] = False
    plt.figure(figsize=(10, 10))
    plt.plot(result, 'ro-', label='预测值')

    plt.plot(test_y.values, 'go--', label='真实值')
    plt.title('线性回归预测-梯度下降')
    plt.xlabel('样本序号')
    plt.ylabel('房价')
    plt.legend()
    plt.show()
    print(LR.loss)
    m_test = np.array([[1,400,3000],[2,200,4000]])
    print(m_test)
    print(S1.fit_transform(m_test))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值