1. 从0开始实现
1.1 导入相关的库
import torch
import random
import numpy as np
1.2 生成数据集与标签
num_inputs = 2
num_examples = 1000
true_w = [2, -4.5]
true_b = 2.6
features = torch.from_numpy(np.random.normal(0, 1, (num_examples, num_inputs)))
labels = true_w[0] * features[:. 0] + true_w[1] * features[:, 1] + b
label += torch.from_numpy(np.random.normal(0, 0.01, size=label.szie()))
1.3 初始化权重参数
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float64)
b = torch.zeros(1, dtype=torch.float64)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
1.4 抽取数据
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = torch.longTensor(indices[i: min(i + batch_size, num_examples)])
yield features.index_select(0, j), labels.index_select(0, j)
1.5 定义模型、损失函数、优化算法
def linreg(X, w, b):
return torch.mm(X, w) + b
def squared_loss(y_hat, y):
return (y_hat - y.view(y_hat.size())) ** 2 / 2
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * param.grad / batch_size
1.6 模型训练
lr = 0.03
batch_size = 10
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
"""
训练模型一共需要num_epochs个迭代周期
每一个迭代周期中,会使用训练数据集中所有样本一次(假设样本数能够被批量大小整除)
X和y分别是小批量样本的特征和标签
"""
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum()
l.backward()
sgd([w, b], lr, batch_size)
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
out:
epoch 1, loss 0.038098
epoch 2, loss 0.000144
epoch 3, loss 0.000047
1.7 结果
print(true_w, '\n', w)
print(true_b, '\n', b)
out:
[2, -4.5]
tensor([[ 2.0006],
[-4.4990]], dtype=torch.float64, requires_grad=True)
2.6
tensor([2.5990], dtype=torch.float64, requires_grad=True)
2. 简洁实现
import torch.utils.data as Data
from torch.nn import init
import torch.optim as optim
from torch import nn
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
batch_size = 10
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Sequential(
nn.Linear(n_feature, 1)
)
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
init.normal_(net.linear[0].weight, mean=0, std=0.01)
init.constant_(net.linear[0].bias, val=0)
loss = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.03)
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
out:
epoch 1, loss: 0.000319
epoch 2, loss: 0.000089
epoch 3, loss: 0.000087
dense = net.linear[0]
print(true_w, dense.weight)
print(true_b, dense.bias)
out:
[2, -3.4] Parameter containing:
tensor([[ 2.0006, -3.3997]], requires_grad=True)
4.2 Parameter containing:
tensor([4.1997], requires_grad=True)