3、线性回归(解析法和梯度下降法)

import random as rd
import matplotlib.pyplot as plt
import functools
import math
import numpy as np

def sample_generate(n, low, sup, w, e):
    X = [[(sup-low) * rd.random() + low if j < len(w)-1 else 1 for j in range(len(w))] for i in range(n)]
    Y = [sum([w[j]*X[i][j] for j in range(len(w))])+e*(rd.random()-0.5) for i in range(n)]
    return X,Y

def inv_calculate(X, Y):

    n = len(X)
    X = np.mat(X)
    Y = np.mat([[Y[i]] for i in range(len(Y))])
    w = np.linalg.inv(X.T*X)*X.T*Y
    w = w.tolist()
    return [w[i][0] for i in range(len(w))]

def LR_2_plot(X, Y, k, b):

    X = [X[i][0] for i in range(len(X))]

    x0, x1 = min(X),max(X)
    y0, y1 = k*x0+b, k*x1+b
    #点、线展示
    plt.scatter(X, Y)
    plt.plot([x0,x1], [y0,y1], color = 'r')
    plt.show()

def f(w, X, Y):
    return (w.T*X.T*X*w - 2*Y.T*X*w)[0,0]

def mod(dw):
    return math.sqrt(sum([dw[i,0]*dw[i,0] for i in range(dw.size)]))

# r为放缩尺度, step_min为最小步长
def Gradient(X, Y, r = 0.8, step_min = 0.00001):
    d = len(X[0])
    X = np.mat(X)
    Y = np.mat([[Y[i]] for i in range(len(Y))])
    w = [[0] for i in range(d)]
    w = np.mat(w)

    # 先采用放缩法确定寻优区间
    while True:
        left = 0
        right = 1
        dw = -X.T * X * w + X.T * Y
        while f(w+right*dw, X, Y) < f(w, X, Y):
            right = right*(2-r)

        mid1 = left*2/3 + right/3
        mid2 = left/3 + right*2/3
        while abs(left - right)*mod(dw) > step_min:
            if f(w+mid1*dw, X, Y) < f(w+mid2*dw, X, Y):
                right = mid2
            else:
                left = mid1
            mid1 = left * 2 / 3 + right / 3
            mid2 = left / 3 + right * 2 / 3

        if left*mod(dw) < step_min:
            break
        w = w + left * dw


    # 由二分法确定寻优步长

    w = w.tolist()
    return [w[i][0] for i in range(len(w))]


if __name__ == "__main__":

    # y = wTx
    X, Y = sample_generate(n = 100, low = 10, sup = 20, w = [2,3,4], e = 1)

    # 解析解,求逆得到, w = (X.T*X)-1 * X.T * Y
    # w = inv_calculate(X,Y)
    # print("\nk: ", w[0])
    # print("\nb: ", w[1])
    # LR_2_plot(X, Y, w[0], w[1])

    # 梯度下降法
    w = Gradient(X, Y)
    print("\nw: ", w)
    # print("\nk: ", w[0])
    # print("\nb: ", w[1])
    # LR_2_plot(X, Y, w[0], w[1])

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值