手撕线性回归

一、 目录

  1. 线性回归实现
  2. 二维线性回归

二、实现

  1. 线性回归实现


'''手撕线性回归'''
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader

seed=10
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmark=False


class Model(nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.linear=nn.Linear(1,1)

    def forward(self,x):
        return self.linear(x)


class MyDataset(Dataset):
    def __init__(self,x,y):
        super(MyDataset,self).__init__()
        self.x=x
        self.y=y
    def __getitem__(self, item):
        return self.x[item],self.y[item]

    def __len__(self):
        return self.x.shape[0]


def train(train_loader,model):
    loss_fn=nn.MSELoss()
    optimizer=torch.optim.Adam(model.parameters(),lr=0.03)
    step=0
    for _epoch in range(100):
        model.train()
        for batch in train_loader:
            x,y=batch[0],batch[1]
            x,y=x.to("cpu"),y.to("cpu")
            x=x.unsqueeze(-1)

            pred=model(x)
            loss=loss_fn(pred,y.view(-1,1)).mean()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if step%10==0:
                print(f"_epoch:{_epoch}  step:{step}  loss {loss}")
            step+=1



if __name__ == '__main__':
    x=torch.range(start=-100,end=100,step=2)
    y=x*5+torch.randint(-10,10,size=(101,))
    plt.scatter(x.tolist(),y.tolist())
    mydatasets=MyDataset(x,y)
    train_loader=DataLoader(mydatasets,batch_size=8,num_workers=2,shuffle=True,drop_last=True)

    model=Model().to("cpu")

    train(train_loader,model)

    print(model.linear.weight.numpy(),model.linear.bias.numpy())
    model=model.eval()
    with torch.no_grad():
        x=x.to("cpu").reshape(-1,1)
        pred=model(x)
        pred=pred.squeeze(-1)
        pred=pred.tolist()
        x=x.tolist()
        plt.plot(x,pred)
    plt.show()

  1. 二维线性回归


'''手撕线性回归'''
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader

seed=10
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmark=False


class Model(nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.linear=nn.Linear(2,1)

    def forward(self,x):

        return self.linear(x)


class MyDataset(Dataset):
    def __init__(self,x,y):
        super(MyDataset,self).__init__()
        self.x=x
        self.y=y
    def __getitem__(self, item):
        return self.x[item],self.y[item]

    def __len__(self):
        return self.x.shape[0]


def train(train_loader,model):
    loss_fn=nn.MSELoss()
    optimizer=torch.optim.SGD(model.parameters(),lr=0.03)
    step=0
    for _epoch in range(8):
        model.train()
        for batch in train_loader:
            x,y=batch[0],batch[1]

            x,y=x.to("cpu"),y.to("cpu")

            pred=model(x)
            loss=loss_fn(pred,y.view(-1,1)).mean()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if step%10==0:
                print(f"_epoch:{_epoch}  step:{step}  loss {loss}")
            step+=1



if __name__ == '__main__':

    num_inputs = 2
    num_example = 1000
    # 生成数据集
    true_w = [4, 5.2]
    true_b = [5]
    features = torch.rand(num_example, num_inputs, dtype=torch.float32)
    print(features.shape)
    labels = features[:, 0] * true_w[0] + features[:, 1] * true_w[1] + true_b[0]
    print(features.size(), labels.size())
    x=features
    y=labels
    #plt.scatter(x.tolist(),y.tolist())

    mydatasets=MyDataset(x,y)
    train_loader=DataLoader(mydatasets,batch_size=8,num_workers=2,shuffle=True,drop_last=True)

    model=Model().to("cpu")

    train(train_loader,model)

    print(model.linear.weight.detach().numpy(),model.linear.bias.detach().numpy())
    model=model.eval()
    with torch.no_grad():
        x=x.to("cpu")
        pred=model(x)
        pred=pred.squeeze(-1)
        pred=pred.tolist()
        x=x.tolist()
    #     plt.plot(x,pred)
    # plt.show()










  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值