动手学深度学习(一、深度学习基础--线性回归)
生成数据集
读取数据
初始化模型参数
定义模型
定义损失函数
定义优化算法
训练模型
一、线性回归从零开始
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.randn(num_examples, num_inputs, dtype=torch.float32)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float32)
print(features[0], labels[0])
#===========================matplotlib画图技术
def use_svg_display():
display.set_matplotlib_formats('svg')
def set_figsize(figsize = (3.5, 2.5)):
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
set_figsize()
plt.scatter(features[:, 1].numpy(), labels.numpy(), 1);
#==================================================
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)]) # 最后一次可能不足一个batch
yield features.index_select(0, j), labels.index_select(0, j)
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, y)
break
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
print(w)
print(b)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
#定义模型
def linreg(X, w, b):
return torch.mm(X, w) + b
#定义损失函数
def squared_loss(y_hat, y):
return (y_hat - y.view(y_hat.size())) ** 2 / 2
#定义优化算法
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * param.grad / batch_size
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum()
l.backward()
sgd([w, b], lr, batch_size)
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' %(epoch + 1, train_l.mean().item()))
print(true_w, '\n', w)
print(true_b, '\n', b)
tensor([ 1.7489, -1.3365]) tensor(12.2518) tensor([[-0.7026, -0.9066], [-0.5462, 0.7684], [ 0.1347, -1.1432], [-0.9276, 0.2061], [-0.2879, 0.8351], [-0.1709, -0.2726], [ 0.3896, 0.6176], [ 0.9559, -0.9961], [ 0.3178, -0.7674], [ 0.1679, -0.2784]]) tensor([5.8999, 0.4829, 8.3593, 1.6556, 0.7915, 4.7864, 2.8755, 9.5106, 7.4385, 5.4885]) tensor([[-0.0004], [ 0.0188]]) tensor([0.]) epoch 1, loss 0.039256 epoch 2, loss 0.000147 epoch 3, loss 0.000051 [2, -3.4] tensor([[ 1.9998], [-3.3993]], requires_grad=True) 4.2 tensor([4.1995], requires_grad=True)
二、线性回归简洁实现
import torch
from matplotlib import pyplot as plt
import numpy as np
import random
#1.生成数据集
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
#正态分布
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
print(features[0], labels[0])
#2.读取数据
import torch.utils.data as Data
batch_size = 10
#将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
#随机读取小批量
data_iter = Data.DataLoader(dataset, batch_size, shuffle = True)
for X, y in data_iter:
print(X)
print(y)
break
#3.定义模型
import torch.nn as nn
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
#forward定义前向传播
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net)
for param in net.parameters():
print(param)
#4.初始化模型参数, 初始化模型中的权重和偏差
from torch.nn import init
#初始化模型的权重和偏差
init.normal_(net.linear.weight, mean = 0, std = 0.01)
init.constant_(net.linear.bias, val = 0)
#net[0].weight应改为net.linear.weight, 因为net[0]这样根据下标访问子模块的写法只有当net是个ModuleList或者Sequential实例时才可以
for param in net.parameters():
print(param)
#5.定义损失函数
loss = nn.MSELoss()
#6.定义优化算法(新建优化器会造成损失函数的收敛出现震荡的情况)
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr = 0.03)
print(optimizer)
#可以为不同子网络设置不同的学习率, 这在finetune时经常用到
#finetune微调
#调整学习率
print("*" * 20)
print(len(optimizer.param_groups))
for param_group in optimizer.param_groups:
param_group['lr'] *= 1
#7.训练模型
#我们通过调用optim实例的step函数来迭代模型参数。按照小批量随机梯度下降的定义,我们在step函数中指明批量大小,从而对批量中样本梯度求平均。
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad()#梯度清零, 等价于net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f'%(epoch, l.item()))
#我们从net获得需要的层,访问其权重(weight)和偏差(bias)
dense = net.linear
print(true_w, dense.weight)
print(true_b, dense.bias)
#使用Pytorch可以更简洁地实现模型
#torch.utils.data提供了有关数据处理的工具
# torch.nn模块定义了大量神经网络的层
# torch.nn.init模块定义了各种初始化方法
# torch.optim模块提供了很多常用的优化算法
tensor([0.8380, 1.8332]) tensor(-0.3578) tensor([[-0.5930, -1.0323], [ 0.4803, -0.6949], [-1.8518, -0.3402], [ 2.1459, -0.9358], [ 0.0538, 0.1641], [-0.2422, 2.7112], [-1.4170, -0.2609], [ 1.1335, 1.3915], [-0.9495, 0.0463], [ 0.7052, -0.1217]]) tensor([ 6.5231, 7.5113, 1.6531, 11.6631, 3.7448, -5.5022, 2.2547, 1.7412, 2.1505, 6.0146]) LinearNet( (linear): Linear(in_features=2, out_features=1, bias=True) ) Parameter containing: tensor([[-0.0726, 0.3277]], requires_grad=True) Parameter containing: tensor([-0.6877], requires_grad=True) Parameter containing: tensor([[-0.0027, -0.0005]], requires_grad=True) Parameter containing: tensor([0.], requires_grad=True) SGD ( Parameter Group 0 dampening: 0 lr: 0.03 momentum: 0 nesterov: False weight_decay: 0 ) ******************** 1 epoch 1, loss: 0.000124 epoch 2, loss: 0.000123 epoch 3, loss: 0.000023 [2, -3.4] Parameter containing: tensor([[ 1.9998, -3.3999]], requires_grad=True) 4.2 Parameter containing: tensor([4.2010], requires_grad=True)