模型查看&参数查看 | 预训练模型加载 | 模型修改 | 参数冻结

import torch
import torch.nn as nn


class Mymodel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.Layer1 = nn.Sequential(
            nn.Linear(3, 4),
            nn.Linear(4, 3)

        )
        self.Layer2 = nn.Linear(3, 6)
        self.Layer3 = nn.Sequential(
            nn.Linear(6, 7),
            nn.Linear(7, 5)
        )

    def forward(self, x):
        x = self.Layer1(x)
        x = self.Layer2(x)
        x = self.Layer3(x)
        return x


net = Mymodel()

-------------------------模型参数查看--------------------

print(net)  # 打印模型结构
#输出
Mymodel(
  (Layer1): Sequential(
    (0): Linear(in_features=3, out_features=4, bias=True)
    (1): Linear(in_features=4, out_features=3, bias=True)
  )
  (Layer2): Linear(in_features=3, out_features=6, bias=True)
  (Layer3): Sequential(
    (0): Linear(in_features=6, out_features=7, bias=True)
    (1): Linear(in_features=7, out_features=5, bias=True)
  )
)
print(net.Layer3)  # 打印模型结构的某一层
#输出
Sequential(
  (0): Linear(in_features=6, out_features=7, bias=True)
  (1): Linear(in_features=7, out_features=5, bias=True)
)
for layer in net.modules():  # 打印模型的各层类型
    print(type(layer))

#输出
<class '__main__.Mymodel'>
<class 'torch.nn.modules.container.Sequential'>
<class 'torch.nn.modules.linear.Linear'>
<class 'torch.nn.modules.linear.Linear'>
<class 'torch.nn.modules.linear.Linear'>
<class 'torch.nn.modules.container.Sequential'>
<class 'torch.nn.modules.linear.Linear'>
<class 'torch.nn.modules.linear.Linear'>

for name, layer in net.named_modules():  # 打印模型的序列名字和类型
    print(name, type(layer))

 <class '__main__.Mymodel'>
Layer1 <class 'torch.nn.modules.container.Sequential'>
Layer1.0 <class 'torch.nn.modules.linear.Linear'>
Layer1.1 <class 'torch.nn.modules.linear.Linear'>
Layer2 <class 'torch.nn.modules.linear.Linear'>
Layer3 <class 'torch.nn.modules.container.Sequential'>
Layer3.0 <class 'torch.nn.modules.linear.Linear'>
Layer3.1 <class 'torch.nn.modules.linear.Linear'>
for layer in net.children():
    print(layer)

Sequential(
  (0): Linear(in_features=3, out_features=4, bias=True)
  (1): Linear(in_features=4, out_features=3, bias=True)
)
Linear(in_features=3, out_features=6, bias=True)
Sequential(
  (0): Linear(in_features=6, out_features=7, bias=True)
  (1): Linear(in_features=7, out_features=5, bias=True)
)
for name, layer in net.named_children():
    print(name, layer)


Layer1 Sequential(
  (0): Linear(in_features=3, out_features=4, bias=True)
  (1): Linear(in_features=4, out_features=3, bias=True)
)
Layer2 Linear(in_features=3, out_features=6, bias=True)
Layer3 Sequential(
  (0): Linear(in_features=6, out_features=7, bias=True)
  (1): Linear(in_features=7, out_features=5, bias=True)
)
for param in net.parameters():#打印模型参数
    print(param.shape)

torch.Size([4, 3])
torch.Size([4])
torch.Size([3, 4])
torch.Size([3])
torch.Size([6, 3])
torch.Size([6])
torch.Size([7, 6])
torch.Size([7])
torch.Size([5, 7])
torch.Size([5])

for name, param in net.named_parameters():
    print(name, param.shape)

#包括权重和参数
Layer1.0.weight torch.Size([4, 3])
Layer1.0.bias torch.Size([4])
Layer1.1.weight torch.Size([3, 4])
Layer1.1.bias torch.Size([3])
Layer2.weight torch.Size([6, 3])
Layer2.bias torch.Size([6])
Layer3.0.weight torch.Size([7, 6])
Layer3.0.bias torch.Size([7])
Layer3.1.weight torch.Size([5, 7])
Layer3.1.bias torch.Size([5])
for key, value in net.state_dict().items():
    print(key, value.shape)


Layer1.0.weight torch.Size([4, 3])
Layer1.0.bias torch.Size([4])
Layer1.1.weight torch.Size([3, 4])
Layer1.1.bias torch.Size([3])
Layer2.weight torch.Size([6, 3])
Layer2.bias torch.Size([6])
Layer3.0.weight torch.Size([7, 6])
Layer3.0.bias torch.Size([7])
Layer3.1.weight torch.Size([5, 7])
Layer3.1.bias torch.Size([5])

---------------------模型的保存与加载----------------------

import torchvision.models as models
alexNet = models.alexnet(True)  # 加载预训练模型权重参数
torch.save(alexNet.state_dict(), 'alexNet_weight.pth')  # 仅仅保存模型的参数
torch.save(alexNet, 'alexNet.pth')  # 保存模型+参数
net1 = torch.load("alexNet.pth")  # 加载模型和参数

net2 = models.alexnet()  # 已有模型,加载参数
net2.load_state_dict(torch.load('alexNet_weight.pth'))

----------------------模型的修改增加删除-------------------------

alexNet = models.alexnet(True)  # 加载预训练模型权重参数
print(alexNet)
print("-----------------修改后---------------------------")
del alexNet.classifier  # 删除某个大层级
print(alexNet)

#输出
AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))
  (classifier): Sequential(
    (0): Dropout(p=0.5, inplace=False)
    (1): Linear(in_features=9216, out_features=4096, bias=True)
    (2): ReLU(inplace=True)
    (3): Dropout(p=0.5, inplace=False)
    (4): Linear(in_features=4096, out_features=4096, bias=True)
    (5): ReLU(inplace=True)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
  )
)
------------------修改后--------------------------
AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))
)
alexNet = models.alexnet(True)  # 加载预训练模型权重参数
print(alexNet)
print("-----------------修改后---------------------------")
del alexNet.classifier[6]  # 删除某个大层级中的某个小部分
print(alexNet)
#输出
AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))
  (classifier): Sequential(
    (0): Dropout(p=0.5, inplace=False)
    (1): Linear(in_features=9216, out_features=4096, bias=True)
    (2): ReLU(inplace=True)
    (3): Dropout(p=0.5, inplace=False)
    (4): Linear(in_features=4096, out_features=4096, bias=True)
    (5): ReLU(inplace=True)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
  )
)
---------------------修改后-----------------------
AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))
  (classifier): Sequential(
    (0): Dropout(p=0.5, inplace=False)
    (1): Linear(in_features=9216, out_features=4096, bias=True)
    (2): ReLU(inplace=True)
    (3): Dropout(p=0.5, inplace=False)
    (4): Linear(in_features=4096, out_features=4096, bias=True)
    (5): ReLU(inplace=True)
  )
)
alexNet = models.alexnet(True)  # 加载预训练模型权重参数
print(alexNet.classifier)
print("----------------修改后----------------------------")
alexNet.classifier = alexNet.classifier[:-2]  # 删除网络的后两层,先读出来,再赋值
print(alexNet.classifier)

#输出
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=4096, out_features=1000, bias=True)
)
----------------修改后----------------------------
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
)
alexNet = models.alexnet(True)  # 加载预训练模型权重参数
print(alexNet.classifier)
print("----------------修改后----------------------------")
alexNet.classifier[6] = nn.Linear(in_features=4096, out_features=1024)  # 修改网络的最后一层
print(alexNet.classifier)

#输出
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=4096, out_features=1000, bias=True)
)
----------------修改后----------------------------
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=4096, out_features=1024, bias=True)
)
alexNet = models.alexnet(True)  # 加载预训练模型权重参数
print(alexNet.classifier)
print("----------------修改后----------------------------")
alexNet.classifier.add_module("7", nn.ReLU(inplace=True))  # 网络添加层,每次添加一层
alexNet.classifier.add_module("8", nn.Linear(in_features=1024, out_features = 20))  # 网络添加层,每次添加一层
print(alexNet.classifier)
#输出
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=4096, out_features=1000, bias=True)
)
----------------修改后----------------------------
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=4096, out_features=1000, bias=True)
  (7): ReLU(inplace=True)
  (8): Linear(in_features=1024, out_features=20, bias=True)
)
alexNet = models.alexnet(True)  # 加载预训练模型权重参数
print(alexNet.classifier)
print("----------------修改后----------------------------")
block = nn.Sequential(nn.ReLU(inplace=True),
                      nn.Linear(in_features=1024, out_features=20)
                      )
alexNet.classifier.add_module("block", block)
print(alexNet)
#输出
Sequential(
  (0): Dropout(p=0.5, inplace=False)
  (1): Linear(in_features=9216, out_features=4096, bias=True)
  (2): ReLU(inplace=True)
  (3): Dropout(p=0.5, inplace=False)
  (4): Linear(in_features=4096, out_features=4096, bias=True)
  (5): ReLU(inplace=True)
  (6): Linear(in_features=4096, out_features=1000, bias=True)
)
----------------修改后----------------------------
AlexNet(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    (1): ReLU(inplace=True)
    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (4): ReLU(inplace=True)
    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (7): ReLU(inplace=True)
    (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (9): ReLU(inplace=True)
    (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))
  (classifier): Sequential(
    (0): Dropout(p=0.5, inplace=False)
    (1): Linear(in_features=9216, out_features=4096, bias=True)
    (2): ReLU(inplace=True)
    (3): Dropout(p=0.5, inplace=False)
    (4): Linear(in_features=4096, out_features=4096, bias=True)
    (5): ReLU(inplace=True)
    (6): Linear(in_features=4096, out_features=1000, bias=True)
    (block): Sequential(
      (0): ReLU(inplace=True)
      (1): Linear(in_features=1024, out_features=20, bias=True)
    )
  )
)
  • 9
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值