import torch
from torch import nn
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = torch.rand(size=(2, 4))
print(net(X))
#检查第⼆个全连接层的参数。
print(net[2].state_dict())
print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)
net[2].weight.grad == None
#⼀次性访问所有参数:演⽰来 ⽐较访问第⼀个全连接层的参数和访问所有层。
print(*[(name, param.shape) for name, param in net[0].named_parameters()])
print(*[(name, param.shape) for name, param in net.named_parameters()])
net.state_dict()['2.bias'].data
#从嵌套块收集参数
def block1():
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())
def block2():
net = nn.Sequential()
for i in range(4):
# 在这⾥嵌套
net.add_module(f'block {i}', block1())
return net
rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
print(rgnet(X))
print(rgnet)
print(rgnet[0][1][0].bias.data)
#参数初始化
#内置初始化:权重参数初始化为标准差为0.01的⾼斯随机变量,且将 偏置参数设置为0。
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
net.apply(init_normal)
net[0].weight.data[0], net[0].bias.data[0]
#将所有参数初始化为给定的常数,⽐如初始化为1。
def init_constant(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
net.apply(init_constant)
print(net[0].weight.data[0], net[0].bias.data[0])
#下⾯我们使⽤Xavier初始化⽅法初始化第⼀个神经⽹络层,然后将第三个神经⽹络层初始化为常量值42。
def xavier(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
def init_42(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 42)
net[0].apply(xavier)
net[2].apply(init_42)
print(net[0].weight.data[0])
print(net[2].weight.data)
#自定义初始化
def my_init(m):
if type(m) == nn.Linear:
print("Init", *[(name, param.shape)
for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
net.apply(my_init)
print(net[0].weight[:2])
#直接设置参数
net[0].weight.data[:] += 1
net[0].weight.data[0, 0] = 42
net[0].weight.data[0]
print(net[0].weight.data[0])
#有时我们希望在多个层间共享参数:我们可以定义⼀个稠密层,然后使⽤它的参数来设置另⼀个层的参数。
# 我们需要给共享层⼀个名称,以便可以引⽤它的参数
shared = nn.Linear(8, 8)
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared, nn.ReLU(), nn.Linear(8, 1))
net(X)
# 检查参数是否相同
print(net[2].weight.data[0] == net[4].weight.data[0])
net[2].weight.data[0, 0] = 100
# 确保它们实际上是同⼀个对象,⽽不只是有相同的值
print(net[2].weight.data[0] == net[4].weight.data[0])
#自定义层
# 不带参数的层
import torch
import torch.nn.functional as F
from torch import nn
class CenteredLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, X):
return X - X.mean()
layer = CenteredLayer()
print(layer(torch.FloatTensor([1, 2, 3, 4, 5])))
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
Y = net(torch.rand(4, 8))
print(Y.mean())
#带参数的层
class MyLinear(nn.Module):
def __init__(self, in_units, units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units,))
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
linear = MyLinear(5, 3)
print(linear.weight)
print(linear(torch.rand(2, 5)))
tensor([[0.1935],
[0.1454]], grad_fn=<AddmmBackward0>)
OrderedDict([('weight', tensor([[ 0.0091, -0.3073, 0.0356, 0.2379, 0.2644, 0.1867, 0.3517, 0.0785]])), ('bias', tensor([0.0362]))])
<class 'torch.nn.parameter.Parameter'>
Parameter containing:
tensor([0.0362], requires_grad=True)
tensor([0.0362])
('weight', torch.Size([8, 4])) ('bias', torch.Size([8]))
('0.weight', torch.Size([8, 4])) ('0.bias', torch.Size([8])) ('2.weight', torch.Size([1, 8])) ('2.bias', torch.Size([1]))
tensor([[-0.2684],
[-0.2684]], grad_fn=<AddmmBackward0>)
Sequential(
(0): Sequential(
(block 0): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
(block 1): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
(block 2): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
(block 3): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
)
(1): Linear(in_features=4, out_features=1, bias=True)
)
tensor([0.0075, 0.0537, 0.1423, 0.2684, 0.2408, 0.4341, 0.3435, 0.0442])
tensor([1., 1., 1., 1.]) tensor(0.)
tensor([ 0.1169, -0.6644, 0.4189, 0.2411])
tensor([[42., 42., 42., 42., 42., 42., 42., 42.]])
Init weight torch.Size([8, 4])
Init weight torch.Size([1, 8])
tensor([[-0.0000, -0.0000, -0.0000, 5.9530],
[-0.0000, 0.0000, 0.0000, -6.5929]], grad_fn=<SliceBackward0>)
tensor([42.0000, 1.0000, 1.0000, 6.9530])
tensor([True, True, True, True, True, True, True, True])
tensor([True, True, True, True, True, True, True, True])
tensor([-2., -1., 0., 1., 2.])
tensor(-1.8626e-09, grad_fn=<MeanBackward0>)
Parameter containing:
tensor([[-1.1524, -0.2598, 0.2327],
[-0.7631, -0.3781, -0.6036],
[ 0.3411, 1.0657, -0.0917],
[ 0.1449, -0.1509, 0.8659],
[ 0.3275, 0.1079, 0.6431]], requires_grad=True)
tensor([[0.0000, 0.4765, 0.8078],
[0.0000, 0.6869, 0.7740]])