pytorch-线性回归

import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random

# generate datasets
num_inputs = 2
num_examples = 1000

true_w = [2, -3.4]
true_b = 3

features = torch.randn(num_examples, num_inputs, dtype=torch.float32)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float32)

# plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)

def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        j = torch.LongTensor(indices[i:min(i+batch_size, num_examples)])
        yield features.index_select(0, j), labels.index_select(0, j)


batch_size = 10
# for x, y in data_iter(batch_size, features, labels):
#     print(x, '\n', y)
#     break


# init model parameters
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)

def linreg(x, w, b):
    return torch.mm(x, w) + b

def squared_loss(y_hat, y):
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

def sgd(params, lr, batch_size):
    for param in params:
        param.data -= lr * param.grad / batch_size

# super parameters init
lr = 0.03
num_epochs = 5
net = linreg
loss = squared_loss

for epoch in range(num_epochs):
    for x, y in data_iter(batch_size, features, labels):
        l = loss(net(x, w, b), y).sum()
        l.backward()
        sgd([w, b], lr, batch_size)
        w.grad.data.zero_()
        b.grad.data.zero_()
    train_l = loss(net(features, w, b), labels)
    print(f'epoch: {epoch+1}, loss: {train_l.mean().item()}')

print(w, true_w, b, true_b)

import torch
from torch import nn
import numpy as np
import torch.utils.data as Data
from torch.nn import init
import torch.optim as optim
torch.manual_seed(1)

if __name__ == '__main__':
    num_inputs = 2
    num_examples = 1000
    true_w = [2, -3.4]
    true_b = 3.2
    features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
    labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
    labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

    batch_size = 10
    dataset = Data.TensorDataset(features, labels)
    data_iter = Data.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=2,
    )

    # for x, y in data_iter:
    #     print(x, '\n', y)
    #     break
    class LinearNet(nn.Module):
        def __init__(self, n_feature):
            super(LinearNet, self).__init__()
            self.linear = nn.Linear(n_feature, 1)

        def forward(self, x):
            y = self.linear(x)
            return y


    net = LinearNet(num_inputs)
    print(net)

    net = nn.Sequential(
        nn.Linear(num_inputs, 1)
    )
    # print(net)
    # print('first layer:', net[0])
    # init.normal_(net[0].weight, mean=0.0, std=0.01)
    #     # init.constant_(net[0].bias, val=0.0)
    init.normal_(net[0].weight, mean=0.0, std=0.01)
    init.constant_(net[0].bias, val=0.0)


    loss = nn.MSELoss()
    optimizer = optim.SGD(net.parameters(), lr=0.03)
    # print(optimizer)
    num_epochs = 3
    for epoch in range(1, num_epochs+1):
        for x, y in data_iter:
            output = net(x)
            l = loss(output, y.view(-1, 1))
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
        print(f'epoch: {epoch}, loss: {l.item()}')

    dense = net[0]
    print(true_w, dense.weight.data)
    print(true_b, dense.bias.data)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值