import torch
from torch import nn
本文目的是将李沐老师提供的代码中的参数初始化方法给总结一遍,为了保证代码完整性,代码块是直接复制一大段的
1. torch.nn.init.normal_(tensor,mean=0.0,std=1.0)
按正态分布对tensor进行随机赋值
def init_weights(m): ##初始化模型参数中的w
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)#默认均值为0
net.apply(init_weights);##运用到net的每一层
2.nn.Parameters()
其实可以省略,只是这样写会让读者更清楚是参数
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = nn.Parameter(torch.randn(
num_inputs, num_hiddens, requires_grad=True) * 0.01)
##等价于W1 = torch.randn(num_inputs, num_hiddens, requires_grad=True) * 0.01
b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))
W2 = nn.Parameter(torch.randn(
num_hiddens, num_outputs, requires_grad=True) * 0.01)
b2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))
params = [W1, b1, W2, b2]
3.param.data.normal_(mean=0,std=1)
def train_concise(wd):
net = nn.Sequential(nn.Linear(num_inputs, 1))
for param in net.parameters():
param.data.normal_()##初始化参数 默认mean=0,std=1
loss = nn.MSELoss(reduction='none')
num_epochs, lr = 100, 0.003
# 偏置参数没有衰减
trainer = torch.optim.SGD([
{"params":net[0].weight,'weight_decay': wd},
{"params":net[0].bias}], lr=lr)
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log',
xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
trainer.zero_grad()
l = loss(net(X), y)
l.mean().backward()
trainer.step()
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1,
(d2l.evaluate_loss(net, train_iter, loss),
d2l.evaluate_loss(net, test_iter, loss)))
print('w的L2范数:', net[0].weight.norm().item())
net = nn.Sequential(nn.Linear(2, 1))
net[0].weight.data.normal_(0, 0.01)
net[0].bias.data.fill_(0)