程序主要使用到的函数总结如下:
# 1 访问模型参数
for name, param in net.named_parameters():
for param in net.parameters():
self.weight1 = nn.Parameter(torch.rand(20,20))
# 2 初始化模型参数
init.normal_(param, mean = 0, std = 0.01)
init.constant_(param, val = 0)
if 'weight' in name:
init.normal_(param, mean = 0, std = 0.01)
if 'bias' in name:
init.constant_(param, val = 0)
# 3 自定义初始化方法
def normal_(tensor, mean = 0, std = 1):
with torch.no_grad():
return torch.normal_(mean, std)
# 4 共享模型参数
print(id(net[0]) == id(net[1]))
print(id(net[0].weight)== id(net[1].weight))
模型参数的访问、初始化和共享
import torch
from torch import nn
from torch.nn import init
net = nn.Sequential(nn.Linear(4,3), nn.ReLU(), nn.Linear(3, 1)) # pytorch已进行默认初始化
print(net)
X = torch.rand(2, 4)
Y = net(X).sum()
Sequential(
(0): Linear(in_features=4, out_features=3, bias=True)
(1): ReLU()
(2): Linear(in_features=3, out_features=1, bias=True)
)
1 访问模型参数
print(type(net.named_parameters())) #返回类型为迭代器
for name, param in net.named_parameters():
print(name, param.size())
<class 'generator'>
0.weight torch.Size([3, 4])
0.bias torch.Size([3])
2.weight torch.Size([1, 3])
2.bias torch.Size([1])
for name, param in net[0].named_parameters():
print(name, param.size(), type(param))
weight torch.Size([3, 4]) <class 'torch.nn.parameter.Parameter'>
bias torch.Size([3]) <class 'torch.nn.parameter.Parameter'>
# 返回的param的类型为torch.nn.parameter.Parameter,这是Tensor的子类,如果一个Tensor是Parameter,那么它会自动被添加到模型的参数列表里
class MyModule(nn.Module):
def __init__(self, **kyargs):
super(MyModule, self).__init__(**kyargs)
self.weight1 = nn.Parameter(torch.rand(20,20))
self.weight2 = torch.rand(20,20)
def forward(self, x):
pass
n = MyModule()
for name, param in n.named_parameters():
print(name, param.size())
weight1 torch.Size([20, 20])
weight_0 = list(net[0].parameters())[0]
print(weight_0.data)
print(weight_0.grad)
Y.backward()
print(weight_0.grad)
tensor([[-0.2172, -0.3502, -0.3399, 0.0253],
[ 0.4869, 0.2350, 0.1072, 0.4208],
[-0.1217, -0.3052, -0.0451, 0.4937]])
None
tensor([[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.3976, 0.0865, 0.2156, 0.3260],
[-0.2608, -0.0370, -0.1443, -0.2987]])
2 初始化模型参数
for name, param in net.named_parameters():
if 'weight' in name:
init.normal_(param, mean = 0, std = 0.01)
print(name, param.data)
0.weight tensor([[-1.9497e-03, 1.7389e-02, -1.0310e-02, -3.6656e-03],
[-7.7060e-04, 1.8553e-02, -4.4594e-05, 7.6487e-03],
[-1.1192e-02, -7.5417e-03, -8.0898e-06, -8.7740e-03]])
2.weight tensor([[-0.0004, -0.0096, -0.0151]])
for name, param in net.named_parameters():
if 'bias' in name:
init.constant_(param, val = 0)
print(name, param.data)
0.bias tensor([0., 0., 0.])
2.bias tensor([0.])
3 自定义初始化方法
# PyTorch实现inplace初始化方法
def normal_(tensor, mean = 0, std = 1):
with torch.no_grad():
return torch.normal_(mean, std)
# 实现一个自定义的初始化方法,权重有一半概率初始化为0,有另一半概率初始化为[−10,−5][−10,−5]和[5,10][5,10]两个区间里均匀分布的随机数
def init_weight(tensor):
with torch.no_grad():
tensor.uniform_(-10, 10)
tensor *= (tensor.abs() >= 5).float()
for name, param in net.named_parameters():
if 'weight' in name:
init_weight(param)
print(name, param)
0.weight Parameter containing:
tensor([[ 8.5147, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.0000, -0.0000],
[-0.0000, -6.8760, 7.5300, -0.0000]], requires_grad=True)
2.weight Parameter containing:
tensor([[-0., 0., 0.]], requires_grad=True)
# 可以通过改变这些参数的data来改写模型参数值,同时不会影响梯度:
for name, param in net.named_parameters():
if 'bias' in name:
param.data += 1
print(name, param.data)
0.bias tensor([5., 5., 5.])
2.bias tensor([5.])
4 共享模型参数
# Module类的forward函数里多次调用同一个层可以共享模型参数;如果传入Sequential的模块是同一个Module实例的话参数也是共享的。
linear = nn.Linear(1, 1, bias = False)
net = nn.Sequential(linear, linear)
print(net)
for name, param in net.named_parameters():
init.constant_(param, val = 3)
print(name, param.data)
Sequential(
(0): Linear(in_features=1, out_features=1, bias=False)
(1): Linear(in_features=1, out_features=1, bias=False)
)
0.weight tensor([[3.]])
# 在内存中,这两个线性层共享一个对象:
print(id(net[0]) == id(net[1]))
print(id(net[0].weight)== id(net[1].weight))
True
True
x = torch.ones(1,1)
y = net(x).sum()
print(y)
y.backward()
print(net[0].weight.grad)
print(net[1].weight.grad)
tensor(9., grad_fn=<SumBackward0>)
tensor([[18.]])
tensor([[18.]])
参考原文
欢迎关注【OAOA】