随手写的numpy实现一元线性回归(拟合三次函数)

import numpy as np
import matplotlib.pyplot as plt

learning_rate=15    # 多次调整
epochs=1000
# input_features=1
# input_size=1000
# output_features=1
# output_size=1000

w=np.ones((1000,))
b=np.ones((1000,))
x=np.random.randn(1000,)
y=np.array([xi**3 for xi in x])   
print(x.shape,y.shape) 
plt.scatter(x,y)
plt.show()
(1000,) (1000,)

在这里插入图片描述

def getloss(pred,label):
    """
    pred:prediction array whose shape is (n,)
    label:label array whose shape is (n,)
    """
    # using MAE loss function
    n=len(pred)
    loss=np.sum((pred-label))/n
    return loss

def gradient_decent(init_weight,init_bias,x_train,y_train,epochs,lr):
    loss=0.
    pred=0.
    w=init_weight
    b=init_bias
    n=len(x_train)
    loss_list=[]
    for epoch in range(epochs):
        if (epoch+1)%50==0:
            print("Epoch {}/{}:".format(epoch+1,epochs))
        # 前向传播  
        pred=w*x_train+b
        loss=getloss(pred,y_train)
        loss_list.append(loss)
        # 后向传播
        grad_w=(pred-y_train)*(2*x_train)/n
        grad_b=(pred-y_train)*(2/n)
        w=w-learning_rate*grad_w
        b=b-learning_rate*grad_b
        if (epoch+1)%50==0:
            print("Loss:{}".format(loss)) 
    return w,b,loss_list
    

l o s s = 1 n ( w x + b − y ) 2 δ l o s s δ w = 2 x n [ w x + ( b − y ) ] δ l o s s δ b = 2 n [ w x + b − y ] loss=\frac{1}{n}(wx+b-y)^2\\\frac{\delta loss}{\delta w}=\frac{2x}{n}[wx+(b-y)]\\\frac{\delta loss}{\delta b}=\frac{2}{n}[wx+b-y] loss=n1(wx+by)2δwδloss=n2x[wx+(by)]δbδloss=n2[wx+by]

w,b,loss=gradient_decent(w,b,x,y,epochs,learning_rate)
loss=np.array(loss)
Epochs=np.array(range(1,epochs+1))
plot_x=np.linspace(-3,3,1000)
prediction=w*plot_x+b
print(plot_x.shape,prediction.shape)
plt.scatter(plot_x,prediction,c='r')
plt.scatter(x,y)
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
plt.plot(Epochs,loss)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
Epoch 50/1000:
Loss:0.11055148091083385
Epoch 100/1000:
Loss:0.018132030312032036
Epoch 150/1000:
Loss:0.00329229159281977
Epoch 200/1000:
Loss:0.0006270415102713176
Epoch 250/1000:
Loss:0.0001228457862997306
Epoch 300/1000:
Loss:2.4524003490275635e-05
Epoch 350/1000:
Loss:4.961975128571768e-06
Epoch 400/1000:
Loss:1.0140914414182407e-06
Epoch 450/1000:
Loss:2.0886560729968403e-07
Epoch 500/1000:
Loss:4.328373669632778e-08
Epoch 550/1000:
Loss:9.014479488816013e-09
Epoch 600/1000:
Loss:1.8850928814734868e-09
Epoch 650/1000:
Loss:3.9555662091026994e-10
Epoch 700/1000:
Loss:8.324164144205574e-11
Epoch 750/1000:
Loss:1.756104207694759e-11
Epoch 800/1000:
Loss:3.7128507107500646e-12
Epoch 850/1000:
Loss:7.866415353904218e-13
Epoch 900/1000:
Loss:1.6715915268167757e-13
Epoch 950/1000:
Loss:3.5795413567759275e-14
Epoch 1000/1000:
Loss:7.921100683848874e-15
(1000,) (1000,)

在这里插入图片描述

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值