线性回归总结

1、线性回归纯式子方式

import matplotlib.pyplot as plt
import numpy as np
import itertools

data = np.array([[80,200],
                [95,230],
                [104,245],
                [112,247],
                [125,259],
                [135,262]])
# com_list = list(itertools.combinations(data,2))#(可迭代的对象,取几个),list强制转换列表
# x=data[:,0]#[行:从头到尾,列]
# y=data[:,1]
#
# ms = []
# bs = []

# for comlist in com_list:
#     x1,y1 = comlist[0]
#     x2,y2 = comlist[1]
#     print(x1,y1)
#     print(x2,y2)
#     m = (y2-y1)/(x2-x1)
#     b = y1-m*x1
#     ms.append(m)#append意思添加m到ms中
#     bs.append(b)
# m,b = np.mean(ms),np.mean(bs)#mean指求平均


#x=140
#predict_fx = m*x+b
#print(f'x=140时,f(x)={predict_fx}')#(f'')格式化字符串


 #计算均方差(mse)
# losses=[]
# for x,y in data:
#     predict_fx = m*x+b
#     loss = (y-predict_fx)**2
#     losses.append(loss)
#     print(losses)
#     print(np.mean(losses))

#梯度下降
#预测模型:f(x)=mx+b

m = 1
b = 1
#learning rate
lr = 0.00001
def gradientdecent(m, b, data, lr):
    # loss为均方误差,mpd为m的偏导数,bpd为b的偏导数
    loss, mpd, bpd = 0, 0, 0
    for xi, yi in data:
        loss += (m*xi+b-yi)**2
        bpd += (m*xi+b-yi)*2
        mpd += (m*xi+b-yi)*2*xi
    #更新m,b
    N = len(data)
    loss = loss/N
    mpd = mpd/N
    bpd = bpd/N
    m = m-mpd*lr
    b = b-bpd*lr
    return loss, m, b

for epoch in range(3000000):
    mse, m, b = gradientdecent(m,b,data,lr)
    if epoch%100000 == 0:
        print(f"loss={mse:.4f},m={m:.4f},b={b:.4f}")

2、pytorch方式使用requires_grad=Ture的方式,意思为梯度反传时对该 Tensor 计算梯度,grad梯度

import torch
import numpy as np
from matplotlib import pyplot as plt



m1 = torch.randn(1,requires_grad=True)
b1 = torch.rand(1)
print(m1)


def forward(x):
    global m1, b1
    return m1*x+b1
data=[2,5]
x=data[0]
y=data[1]


predict = forward(x)
loss=(y-predict)**2
loss.backward()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值