神经网络基本操作
层和块
import torch
from torch import nn
from torch.nn import functional as F
net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
X = torch.rand(2, 20)
net(X)
nn.Sequential
定义了一个特殊的Module
tensor([[ 0.1161, -0.1570, 0.0382, 0.1133, 0.0828, -0.1615, 0.2370, -0.1944,-0.2155, -0.0010],
[-0.0329, -0.1095, 0.1069, 0.1126, -0.0931, -0.2036, 0.2056, -0.1133,-0.1490, -0.0717]], grad_fn=<AddmmBackward0>)
自定义块
class MLP(nn.Module):
# 用模型参数声明层。这里,我们声明两个全连接的层
def __init__(self):
# 调用MLP的父类Module的构造函数来执行必要的初始化。
# 这样,在类实例化时也可以指定其他函数参数,例如模型参数params(稍后将介绍)
super().__init__()
self.hidden = nn.Linear(20, 256) # 隐藏层
self.out = nn.Linear(256, 10) # 输出层
# 定义模型的前向传播,即如何根据输入X返回所需的模型输出
def forward(self, X):
# 注意,这里我们使用ReLU的函数版本,其在nn.functional模块中定义。
return self.out(F.relu(self.hidden(X)))
继承自
nn.Module
net = MLP()
net(X)
tensor([[-0.1477, -0.0113, -0.3262, -0.3437, -0.1782, 0.0064, 0.1695, -0.0377,-0.1234, -0.1726],
[-0.1396, -0.0354, -0.2221, -0.2403, -0.2171, -0.0176, 0.1111, -0.0583,-0.1728, -0.2700]], grad_fn=<AddmmBackward0>)
顺序块
我们自己定义一个和Sequential差不多的类
class MySequential(nn.Module):
def __init__(self, *args):
super().__init__()
for idx, module in enumerate(args):
# 这里,module是Module子类的一个实例。我们把它保存在'Module'类的成员
# 变量_modules中。module的类型是OrderedDict
self._modules[str(idx)] = module
def forward(self, X):
# OrderedDict保证了按照成员添加的顺序遍历它们
for block in self._modules.values():
X = block(X)
return X
也是继承自nn.Module
enumerate(args)
:返回下标和数组对象
net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net(X)
tensor([[ 0.1401, -0.4604, 0.1438, -0.1163, -0.0369, 0.0700, 0.1357, -0.0974,0.0121, -0.1449],
[ 0.1717, -0.4772, 0.0640, -0.0728, -0.0654, 0.1146, 0.1494, -0.0043,-0.1300, -0.0491]], grad_fn=<AddmmBackward0>)
在前向传播函数中执行代码
class FixedHiddenMLP(nn.Module):
def __init__(self):
super().__init__()
# 不计算梯度的随机权重参数。因此其在训练期间保持不变
self.rand_weight = torch.rand((20, 20), requires_grad=False)
self.linear = nn.Linear(20, 20)
def forward(self, X):
X = self.linear(X)
# 使用创建的常量参数以及relu和mm函数
X = F.relu(torch.mm(X, self.rand_weight) + 1)
# 复用全连接层。这相当于两个全连接层共享参数
X = self.linear(X)
# 控制流
while X.abs().sum() > 1:
X /= 2
return X.sum()
net = FixedHiddenMLP()
net(X)
tensor(0.1599, grad_fn=<SumBackward0>)
我们甚至可以套娃
class NestMLP(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU())
self.linear = nn.Linear(32, 16)
def forward(self, X):
return self.linear(self.net(X))
chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
chimera(X)
tensor(-0.2037, grad_fn=<SumBackward0>)
参数管理
import torch
from torch import nn
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = torch.rand(size=(2, 4))
net(X)
tensor([[0.2372],
[0.4121]], grad_fn=<AddmmBackward0>)
参数访问
print(net[2].state_dict())
state_dict()
:用来查看参数
OrderedDict([('weight', tensor([[ 0.2317, 0.3447, 0.2471, -0.2507, -0.0154, -0.0791, 0.1523, -0.1643]])), ('bias', tensor([0.2571]))])
print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)
<class 'torch.nn.parameter.Parameter'>
Parameter containing:
tensor([0.2571], requires_grad=True)
tensor([0.2571])
net[2].bias
包含两个属性:1.data 2.是否参与梯度下降
net[2].weight.grad == None
True
print(*[(name, param.shape) for name, param in net[0].named_parameters()])
print(*[(name, param.shape) for name, param in net.named_parameters()])
查看net[0]中的参数
查看整个net中的参数
('weight', torch.Size([8, 4])) ('bias', torch.Size([8]))
('0.weight', torch.Size([8, 4])) ('0.bias', torch.Size([8])) ('2.weight', torch.Size([1, 8])) ('2.bias', torch.Size([1]))
另一种访问参数的方式
net.state_dict()['2.bias'].data
tensor([0.2571])
获取套娃net中的参数
定义两个块
def block1():
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(),
nn.Linear(8, 4), nn.ReLU())
def block2():
net = nn.Sequential()
for i in range(4):
# 在这里嵌套
net.add_module(f'block {i}', block1())
return net
rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
rgnet(X)
add_module
和nn.Sequential()
把层直接放在Sequential里的区别是add_module能添加一个字符串给block起个名字
tensor([[0.3181],
[0.3181]], grad_fn=<AddmmBackward0>)
看块的详情
print(rgnet)
Sequential(
(0): Sequential(
(block 0): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
(block 1): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
(block 2): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
(block 3): Sequential(
(0): Linear(in_features=4, out_features=8, bias=True)
(1): ReLU()
(2): Linear(in_features=8, out_features=4, bias=True)
(3): ReLU()
)
)
(1): Linear(in_features=4, out_features=1, bias=True)
)
看参数
rgnet[0][1][0].bias.data
tensor([ 0.4537, -0.0671, -0.2635, -0.3841, -0.4680, -0.0306, 0.3772, -0.3542])
参数初始化
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.zeros_(m.bias)
net.apply(init_normal)
net[0].weight.data[0], net[0].bias.data[0]
net.apply
对每一层应用初始化方法
normal_
以“_”结尾的方法:将参数m.weight
替换没有返回值
(tensor([ 0.0094, -0.0039, -0.0045, 0.0260]), tensor(0.))
def init_constant(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
net.apply(init_constant)
net[0].weight.data[0], net[0].bias.data[0]
(tensor([1., 1., 1., 1.]), tensor(0.))
def xavier(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
def init_42(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 42)
net[0].apply(xavier)
net[2].apply(init_42)
print(net[0].weight.data[0])
print(net[2].weight.data)
xavier
使输入和输出保持同样的均值和方差
tensor([-0.1767, -0.0299, -0.1269, 0.6998])
tensor([[42., 42., 42., 42., 42., 42., 42., 42.]])
自定义初始化
def my_init(m):
if type(m) == nn.Linear:
print("Init", *[(name, param.shape)
for name, param in m.named_parameters()][0])
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5
net.apply(my_init)
net[0].weight[:2]
Init weight torch.Size([8, 4])
Init weight torch.Size([1, 8])
tensor([[ 0.0000, -0.0000, 7.7878, -7.3791],
[-0.0000, 6.9575, -7.4805, -0.0000]], grad_fn=<SliceBackward0>)
我们也可以直接设置参数
net[0].weight.data[:] += 1
net[0].weight.data[0, 0] = 42
net[0].weight.data[0]
tensor([42.0000, 1.0000, 8.7878, -6.3791])
参数绑定
# 我们需要给共享层一个名称,以便可以引用它的参数
shared = nn.Linear(8, 8)
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(),
shared, nn.ReLU(),
shared, nn.ReLU(),
nn.Linear(8, 1))
net(X)
# 检查参数是否相同
print(net[2].weight.data[0] == net[4].weight.data[0])
net[2].weight.data[0, 0] = 100
# 确保它们实际上是同一个对象,而不只是有相同的值
print(net[2].weight.data[0] == net[4].weight.data[0])
tensor([True, True, True, True, True, True, True, True])
tensor([True, True, True, True, True, True, True, True])
个人理解
shared
相当于全局变量
自定义层
其实和自定义块差不多
不带参数的层
import torch
import torch.nn.functional as F
from torch import nn
class CenteredLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, X):
return X - X.mean()
同样继承自
nn.Module
layer = CenteredLayer()
layer(torch.FloatTensor([1, 2, 3, 4, 5]))
tensor([-2., -1., 0., 1., 2.])
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
Y = net(torch.rand(4, 8))
Y.mean()
tensor(-2.7940e-09, grad_fn=<MeanBackward0>)
带参数的层
class MyLinear(nn.Module):
def __init__(self, in_units, units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units,))
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
linear = MyLinear(5, 3)
linear.weight
层中的参数
weight
,bias
都是nn.Parameter
的实例,这里说的参数和标题说的参数不是一个参数,标题说的是in_units, units
Parameter containing:
tensor([[-0.7678, 0.7135, -0.2640],
[ 0.3996, 0.0243, 0.8946],
[-0.0224, -1.2642, -1.3209],
[ 2.2299, 0.1600, -0.0154],
[-0.3228, -0.8990, 0.3771]], requires_grad=True)
linear(torch.rand(2, 5))
tensor([[0.0000, 0.0000, 0.0000],
[0.0315, 0.0000, 0.0000]])
net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))
net(torch.rand(2, 64))
tensor([[11.8317],
[12.3625]])
读写文件
加载和保存张量
import torch
from torch import nn
from torch.nn import functional as F
x = torch.arange(4)
torch.save(x, 'x-file')
x2 = torch.load('x-file')
x2
tensor([0, 1, 2, 3])
也可以保存一个List
y = torch.zeros(4)
torch.save([x, y],'x-files')
x2, y2 = torch.load('x-files')
(x2, y2)
(tensor([0, 1, 2, 3]), tensor([0., 0., 0., 0.]))
也可以保存一个字典
mydict = {'x': x, 'y': y}
torch.save(mydict, 'mydict')
mydict2 = torch.load('mydict')
mydict2
{'x': tensor([0, 1, 2, 3]), 'y': tensor([0., 0., 0., 0.])}
加载和保存模型参数
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(F.relu(self.hidden(x)))
net = MLP()
X = torch.randn(size=(2, 20))
Y = net(X)
torch.save(net.state_dict(), 'mlp.params')
clone = MLP()
clone.load_state_dict(torch.load('mlp.params'))
clone.eval()
MLP(
(hidden): Linear(in_features=20, out_features=256, bias=True)
(output): Linear(in_features=256, out_features=10, bias=True)
)
Y_clone = clone(X)
Y_clone == Y
tensor([[True, True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True, True]])