对于B站课程16_P1
'''自定义网络'''
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
# 用模型参数声明层。这里,我们声明两个全连接的层
def __init__(self):
# 调用MLP的父类Module的构造函数来执行必要的初始化。
# 这样,在类实例化时也可以指定其他函数参数,例如模型参数params(稍后将介绍)
super().__init__()
self.hidden = nn.Linear(20, 256) # 隐藏层
self.out = nn.Linear(256, 10) # 输出层
# 定义模型的前向传播,即如何根据输入X返回所需的模型输出
def forward(self, X):
# 注意,这里我们使用ReLU的函数版本,其在nn.functional模块中定义。
return self.out(F.relu(self.hidden(X)))
class MySequential(nn.Module): # 作用同nn.Sequential
def __init__(self, *args): # *args将不定数量的参数传递给函数
super().__init__()
for block in args:
# 这里,module是Module子类的一个实例。我们把它保存在'Module'类的成员
# 变量_modules中。module的类型是OrderedDict
self._modules[block] = block
def forward(self, X):
# OrderedDict保证了按照成员添加的顺序遍历它们
for block in self._modules.values():
X = block(X)
return X
class FixedHiddenMLP(nn.Module):
def __init__(self):
super().__init__()
# 不计算梯度的随机权重参数。因此其在训练期间保持不变
self.rand_weight = torch.rand((20, 20), requires_grad=False) # 随机生成一个权重,不计算梯度
self.linear = nn.Linear(20, 20)
def forward(self, X):
X = self.linear(X) # linear即Wx+b,此处W随机生成
# 使用创建的常量参数以及relu和mm函数
X = F.relu(torch.mm(X, self.rand_weight) + 1)
# 复用全连接层。这相当于两个全连接层共享参数
X = self.linear(X)
# 控制流
while X.abs().sum() > 1:
X /= 2
return X.sum()
class NestMLP(nn.Module):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU())
self.linear = nn.Linear(32, 16)
def forward(self, X):
return self.linear(self.net(X))
if __name__ == '__main__':
# net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
# net = MLP()
# net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
# net = FixedHiddenMLP()
net = MySequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
X = torch.rand(2, 20)
net(X)
print(net(X))
对于B站课程16_P2
import torch
from torch import nn
def block1():
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(),
nn.Linear(8, 4), nn.ReLU())
def block2():
net = nn.Sequential()
for i in range(4):
# 在这里嵌套
net.add_module(f'block {i}', block1())
return net
def init_normal(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, mean=0, std=0.01) # _的含义用自定义的weight替换原始的weight
nn.init.zeros_(m.bias)
def init_constant(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
def xavier(m): # xavier初始化,每一层网络保证输入和输出的方差相同
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight) # _uniform_均匀分布
def init_42(m):
if type(m) == nn.Linear:
nn.init.constant_(m.weight, 42)
def my_init(m):
if type(m) == nn.Linear:
print("Init", *[(name, param.shape)
for name, param in m.named_parameters()][0]) # [0]对应weight,[1]对应bias
nn.init.uniform_(m.weight, -10, 10)
m.weight.data *= m.weight.data.abs() >= 5 # 先做乘法,小于5的项置0
if __name__ == '__main__':
X = torch.rand(size=(2, 4))
# net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
'''
print(net(X))
# 参数访问
print(net[2].state_dict()) # net[0]:nn.Linear(4, 8);net[1]:nn.ReLU() 输出第二个全连接层的参数
# 目标参数
print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)
print(net.state_dict()['2.bias'].data) # 与上一句等效
print(net[2].weight.grad) # 我们还没有调用反向传播,所以参数的梯度处于初始状态None
# 一次性访问所有参数
print(*[(name, param.shape) for name, param in net[0].named_parameters()])
print(*[(name, param.shape) for name, param in net.named_parameters()])
'''
'''
# 嵌套参数访问
rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
print(rgnet(X))
print(rgnet)
# 像通过嵌套列表索引一样访问它们
print(rgnet[0][1][0].bias.data)
'''
'''
# 所有内置参数初始化
net.apply(init_normal) # apply对所有net里的layer逐一进行
print(net[0].weight.data[0], net[0].bias.data[0])
net.apply(init_constant)
print(net[0].weight.data[0], net[0].bias.data[0])
'''
'''
# 特定内置参数初始化
net[0].apply(xavier)
net[2].apply(init_42)
print(net[0].weight.data[1])
print(net[2].weight.data)
'''
'''
# 自定义初始化
net.apply(my_init)
print(net[0].weight[:2])
'''
'''
# 直接替换
net[0].weight.data[:] += 1
net[0].weight.data[0, 0] = 42
print(net[0].weight.data[0])
'''
# 参数绑定
# 我们需要给共享层一个名称,以便可以引用它的参数
shared = nn.Linear(8, 8)
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(),
shared, nn.ReLU(),
shared, nn.ReLU(),
nn.Linear(8, 1))
print(net(X))
# 检查参数是否相同
print(net[2].weight.data[0,1] == net[4].weight.data[0,1])
net[2].weight.data[0, 0] = 100
# 确保它们实际上是同一个对象,而不只是有相同的值
print(net[2].weight.data[0] == net[4].weight.data[0]) # data[0]表示第0行
对于B站课程16_P3
'''自定义层'''
import torch
import torch.nn.functional as F
from torch import nn
class CenteredLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, X):
return X - X.mean()
class MyLinear(nn.Module):
def __init__(self, in_units, units): # in_units输入维度 units输出维度
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units,))
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
if __name__ == '__main__':
'''
# 无参
layer = CenteredLayer()
print(layer(torch.FloatTensor([1, 2, 3, 4, 5])))
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
y = net(torch.rand(4, 8))
print(y.mean()) # y.shape [4,128]
'''
'''
# 带参(输入输出维度)
linear = MyLinear(5, 3) # 输入维度5,输出维度3
print(linear.weight)
print(linear(torch.rand(2, 5)))
'''
net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))
print(net(torch.normal(0,0.1,(2, 64))))
对于B站课程16_P4
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(F.relu(self.hidden(x)))
if __name__ == '__main__':
# x = torch.arange(4)
# y = torch.zeros(4)
'''
torch.save(x, 'x-file')
x2 = torch.load('x-file')
print(x2)
'''
'''
# 列表
torch.save([x, y],'xy-file')
x2, y2 = torch.load('xy-file')
print((x2, y2))
'''
'''
# 字典
mydict = {'x': x, 'y': y}
torch.save(mydict, 'mydict')
mydict2 = torch.load('mydict')
print(mydict2)
'''
net = MLP()
X = torch.randn(size=(2, 20)) # 与randn(2, 20)无差
Y = net(X)
torch.save(net.state_dict(), 'mlp.params') # state_dict得到所有字符串到parameter的映射 存成了字典
clone = MLP() # 加载 MLP()模型,随机生成了参数
clone.load_state_dict(torch.load('mlp.params')) # 覆盖随机生成的参数
print(clone.eval())
Y_clone = clone(X)
print(Y_clone == Y)
对于B站课程17_P1
import torch
from torch import nn
def try_gpu(i=0):
"""如果存在,则返回gpu(i),否则返回cpu()"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
def try_all_gpus():
"""返回所有可用的GPU,如果没有GPU,则返回[cpu(),]"""
devices = [torch.device(f'cuda:{i}')
for i in range(torch.cuda.device_count())]
return devices if devices else [torch.device('cpu')]
# torch.device('cpu') # 用CPU
# torch.device('cuda') # 默认用0号GPU
# torch.device('cuda:1') # 用1号GPU
# print(torch.cuda.device_count())
# print(try_gpu())
# print(try_gpu(10))
# print( try_all_gpus())
'''
# 在GPU创建变量
x = torch.tensor([1, 2, 3])
print(x.device)
X = torch.ones(2, 3, device=try_gpu())
print(X)
'''
''''''
# 在GPU运行神经网络
X = torch.ones(2, 3, device=try_gpu())
net = nn.Sequential(nn.Linear(3, 1))
net = net.to(device=try_gpu())
print(net(X))
print(net[0].weight.data.device)