第一章 线性回归(pytorch 实现)
线性回归;softmax与分类模型;多层感知机
import torch
from torch import nn
import numpy as np
torch.manual_seed(1)
print(torch.version)
torch.set_default_tensor_type(‘torch.FloatTensor’)
生成数据集
在这里生成数据集跟从零开始的实现中是完全一样的。
In [17]:
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
读取数据集
In [18]:
import torch.utils.data as Data
batch_size = 10
combine featues and labels of dataset
dataset = Data.TensorDataset(features, labels)
put dataset into DataLoader
data_iter = Data.DataLoader(
dataset=dataset, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True, # whether shuffle the data or not
num_workers=2, # read data in multithreading
)
In [19]:
for X, y in data_iter:
print(X, ‘\n’, y)
break
定义模型
In [20]:
class LinearNet(nn.Module):
def init(self, n_feature):
super(LinearNet, self).init() # call father function to init
self.linear = nn.Linear(n_feature, 1) # function prototype: torch.nn.Linear(in_features, out_features, bias=True)
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net)
In [21]:
ways to init a multilayer network
method one
net = nn.Sequential(
nn.Linear(num_inputs, 1)
# other layers can be added here
)
method two
net = nn.Sequential()
net.add_module(‘linear’, nn.Linear(num_inputs, 1))
net.add_module …
method three
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
(‘linear’, nn.Linear(num_inputs, 1))
# …
]))
print(net)
print(net[0])
初始化模型参数
In [22]:
from torch.nn import init
init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0) # or you can use net[0].bias.data.fill_(0)
to modify it directly
In [23]:
for param in net.parameters():
print(param)
定义损失函数
In [24]:
loss = nn.MSELoss() # nn built-in squared loss function
# function prototype: torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')
定义优化函数
In [25]:
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.03) # built-in random gradient descent function
print(optimizer) # function prototype: torch.optim.SGD(params, lr=, momentum=0, dampening=0, weight_decay=0, nesterov=False)
训练
In [26]:
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad() # reset gradient, equal to net.zero_grad()
l.backward()
optimizer.step()
print(‘epoch %d, loss: %f’ % (epoch, l.item()))
In [27]:
result comparision
dense = net[0]
print(true_w, dense.weight.data)
print(true_b, dense.bias.data)