线性回归小实战

线性回归小实战

#房价预测
import numpy as np
import random
import matplotlib_inline.backend_inline
import matplotlib.pyplot as plt
import torch

#设计房价数据集
samples = 1000
features = 2
w_0 = [2,-3.4]
b_0 = 4.2
data = torch.from_numpy(np.random.normal(0,1,(samples,features))).float()
labels = w_0[0] * data[:,0] + w_0[1] * data[:,1] + b_0
labels += torch.from_numpy(np.random.normal(0,0.1,labels.size())).float()
print(data.size(),labels.size())

#进行图像散点图绘制
def get_scatter(figure_size,x,y):
    matplotlib_inline.backend_inline.set_matplotlib_formats('svg')
    plt.rcParams['figure.figsize']=figure_size
    plt.scatter(x,y,s=1,c='r')
    plt.show()

get_scatter([3.5,2.5],data[:,1],labels)

#加载数据集
def iter_data(batchsize,data,labels):
    index = list(range(samples))
    random.shuffle(index)
    for batch in range(int(len(index)/batchsize)):
        j = torch.LongTensor(index[0+batchsize*batch:min(batchsize*(batch+1),len(index))])

        yield data.index_select(0,j),labels.index_select(0,j)
#初始化网络
def init():
    w = torch.tensor(np.random.normal(0,0.1,(features,1)),requires_grad=True,dtype=torch.float32)
    b = torch.zeros(1,requires_grad=True,dtype=torch.float32)
    return w,b

#BP算法,回归,反向更新梯度
def forward(x,w,b):
    out = torch.matmul(x,w) + b
    return out

def loss(y_hat,y):
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

def Parms_grad(Parms,lr,batch_size):
    for param in Parms:
        param.data -= param.grad * lr / batch_size   #注意:不要用grad(),这样会报错TypeError: 'Tensor' object is not callable

if __name__ == '__main__':
    epochs = 10
    lr = 0.01
    batchsize = 10
    #初始化w,b
    w,b = init()
    print(w,b)
    loss_epoch = 0
    for epoch in range(epochs):
        for (X,y) in iter_data(batchsize,data,labels):
            #前向传播
            y_hat = forward(X,w,b)
            #计算损失函数
            l = loss(y_hat,y).sum()
            # print(l)
            #反向传播
            l.backward()
            Parms_grad([w,b],lr,batchsize)

            #梯度清零
            w.grad.data.zero_()
            b.grad.data.zero_()
            #求每一轮数的loss
            loss_epoch += l
        train_loss = loss_epoch/batchsize
        loss_epoch = 0
        print("epoch{}  train_loss:{}".format(epoch,train_loss))
    print('w,b:',w,b)

房价分布关系散点图
在这里插入图片描述

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值