import torch
import torch. nn as nn
class Mymodel ( torch. nn. Module) :
def __init__ ( self) :
super ( ) . __init__( )
self. Layer1 = nn. Sequential(
nn. Linear( 3 , 4 ) ,
nn. Linear( 4 , 3 )
)
self. Layer2 = nn. Linear( 3 , 6 )
self. Layer3 = nn. Sequential(
nn. Linear( 6 , 7 ) ,
nn. Linear( 7 , 5 )
)
def forward ( self, x) :
x = self. Layer1( x)
x = self. Layer2( x)
x = self. Layer3( x)
return x
net = Mymodel( )
-------------------------模型参数查看--------------------
print ( net)
Mymodel(
( Layer1) : Sequential(
( 0 ) : Linear( in_features= 3 , out_features= 4 , bias= True )
( 1 ) : Linear( in_features= 4 , out_features= 3 , bias= True )
)
( Layer2) : Linear( in_features= 3 , out_features= 6 , bias= True )
( Layer3) : Sequential(
( 0 ) : Linear( in_features= 6 , out_features= 7 , bias= True )
( 1 ) : Linear( in_features= 7 , out_features= 5 , bias= True )
)
)
print ( net. Layer3)
Sequential(
( 0 ) : Linear( in_features= 6 , out_features= 7 , bias= True )
( 1 ) : Linear( in_features= 7 , out_features= 5 , bias= True )
)
for layer in net. modules( ) :
print ( type ( layer) )
< class '__main__.Mymodel' >
< class 'torch.nn.modules.container.Sequential' >
< class 'torch.nn.modules.linear.Linear' >
< class 'torch.nn.modules.linear.Linear' >
< class 'torch.nn.modules.linear.Linear' >
< class 'torch.nn.modules.container.Sequential' >
< class 'torch.nn.modules.linear.Linear' >
< class 'torch.nn.modules.linear.Linear' >
for name, layer in net. named_modules( ) :
print ( name, type ( layer) )
< class '__main__.Mymodel' >
Layer1 < class 'torch.nn.modules.container.Sequential' >
Layer1. 0 < class 'torch.nn.modules.linear.Linear' >
Layer1. 1 < class 'torch.nn.modules.linear.Linear' >
Layer2 < class 'torch.nn.modules.linear.Linear' >
Layer3 < class 'torch.nn.modules.container.Sequential' >
Layer3. 0 < class 'torch.nn.modules.linear.Linear' >
Layer3. 1 < class 'torch.nn.modules.linear.Linear' >
for layer in net. children( ) :
print ( layer)
Sequential(
( 0 ) : Linear( in_features= 3 , out_features= 4 , bias= True )
( 1 ) : Linear( in_features= 4 , out_features= 3 , bias= True )
)
Linear( in_features= 3 , out_features= 6 , bias= True )
Sequential(
( 0 ) : Linear( in_features= 6 , out_features= 7 , bias= True )
( 1 ) : Linear( in_features= 7 , out_features= 5 , bias= True )
)
for name, layer in net. named_children( ) :
print ( name, layer)
Layer1 Sequential(
( 0 ) : Linear( in_features= 3 , out_features= 4 , bias= True )
( 1 ) : Linear( in_features= 4 , out_features= 3 , bias= True )
)
Layer2 Linear( in_features= 3 , out_features= 6 , bias= True )
Layer3 Sequential(
( 0 ) : Linear( in_features= 6 , out_features= 7 , bias= True )
( 1 ) : Linear( in_features= 7 , out_features= 5 , bias= True )
)
for param in net. parameters( ) :
print ( param. shape)
torch. Size( [ 4 , 3 ] )
torch. Size( [ 4 ] )
torch. Size( [ 3 , 4 ] )
torch. Size( [ 3 ] )
torch. Size( [ 6 , 3 ] )
torch. Size( [ 6 ] )
torch. Size( [ 7 , 6 ] )
torch. Size( [ 7 ] )
torch. Size( [ 5 , 7 ] )
torch. Size( [ 5 ] )
for name, param in net. named_parameters( ) :
print ( name, param. shape)
Layer1. 0 . weight torch. Size( [ 4 , 3 ] )
Layer1. 0 . bias torch. Size( [ 4 ] )
Layer1. 1 . weight torch. Size( [ 3 , 4 ] )
Layer1. 1 . bias torch. Size( [ 3 ] )
Layer2. weight torch. Size( [ 6 , 3 ] )
Layer2. bias torch. Size( [ 6 ] )
Layer3. 0 . weight torch. Size( [ 7 , 6 ] )
Layer3. 0 . bias torch. Size( [ 7 ] )
Layer3. 1 . weight torch. Size( [ 5 , 7 ] )
Layer3. 1 . bias torch. Size( [ 5 ] )
for key, value in net. state_dict( ) . items( ) :
print ( key, value. shape)
Layer1. 0 . weight torch. Size( [ 4 , 3 ] )
Layer1. 0 . bias torch. Size( [ 4 ] )
Layer1. 1 . weight torch. Size( [ 3 , 4 ] )
Layer1. 1 . bias torch. Size( [ 3 ] )
Layer2. weight torch. Size( [ 6 , 3 ] )
Layer2. bias torch. Size( [ 6 ] )
Layer3. 0 . weight torch. Size( [ 7 , 6 ] )
Layer3. 0 . bias torch. Size( [ 7 ] )
Layer3. 1 . weight torch. Size( [ 5 , 7 ] )
Layer3. 1 . bias torch. Size( [ 5 ] )
---------------------模型的保存与加载----------------------
import torchvision. models as models
alexNet = models. alexnet( True )
torch. save( alexNet. state_dict( ) , 'alexNet_weight.pth' )
torch. save( alexNet, 'alexNet.pth' )
net1 = torch. load( "alexNet.pth" )
net2 = models. alexnet( )
net2. load_state_dict( torch. load( 'alexNet_weight.pth' ) )
----------------------模型的修改增加删除-------------------------
alexNet = models. alexnet( True )
print ( alexNet)
print ( "-----------------修改后---------------------------" )
del alexNet. classifier
print ( alexNet)
AlexNet(
( features) : Sequential(
( 0 ) : Conv2d( 3 , 64 , kernel_size= ( 11 , 11 ) , stride= ( 4 , 4 ) , padding= ( 2 , 2 ) )
( 1 ) : ReLU( inplace= True )
( 2 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 3 ) : Conv2d( 64 , 192 , kernel_size= ( 5 , 5 ) , stride= ( 1 , 1 ) , padding= ( 2 , 2 ) )
( 4 ) : ReLU( inplace= True )
( 5 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 6 ) : Conv2d( 192 , 384 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 7 ) : ReLU( inplace= True )
( 8 ) : Conv2d( 384 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 9 ) : ReLU( inplace= True )
( 10 ) : Conv2d( 256 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 11 ) : ReLU( inplace= True )
( 12 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
)
( avgpool) : AdaptiveAvgPool2d( output_size= ( 6 , 6 ) )
( classifier) : Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
)
)
- - - - - - - - - - - - - - - - - - 修改后- - - - - - - - - - - - - - - - - - - - - - - - - -
AlexNet(
( features) : Sequential(
( 0 ) : Conv2d( 3 , 64 , kernel_size= ( 11 , 11 ) , stride= ( 4 , 4 ) , padding= ( 2 , 2 ) )
( 1 ) : ReLU( inplace= True )
( 2 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 3 ) : Conv2d( 64 , 192 , kernel_size= ( 5 , 5 ) , stride= ( 1 , 1 ) , padding= ( 2 , 2 ) )
( 4 ) : ReLU( inplace= True )
( 5 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 6 ) : Conv2d( 192 , 384 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 7 ) : ReLU( inplace= True )
( 8 ) : Conv2d( 384 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 9 ) : ReLU( inplace= True )
( 10 ) : Conv2d( 256 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 11 ) : ReLU( inplace= True )
( 12 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
)
( avgpool) : AdaptiveAvgPool2d( output_size= ( 6 , 6 ) )
)
alexNet = models. alexnet( True )
print ( alexNet)
print ( "-----------------修改后---------------------------" )
del alexNet. classifier[ 6 ]
print ( alexNet)
AlexNet(
( features) : Sequential(
( 0 ) : Conv2d( 3 , 64 , kernel_size= ( 11 , 11 ) , stride= ( 4 , 4 ) , padding= ( 2 , 2 ) )
( 1 ) : ReLU( inplace= True )
( 2 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 3 ) : Conv2d( 64 , 192 , kernel_size= ( 5 , 5 ) , stride= ( 1 , 1 ) , padding= ( 2 , 2 ) )
( 4 ) : ReLU( inplace= True )
( 5 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 6 ) : Conv2d( 192 , 384 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 7 ) : ReLU( inplace= True )
( 8 ) : Conv2d( 384 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 9 ) : ReLU( inplace= True )
( 10 ) : Conv2d( 256 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 11 ) : ReLU( inplace= True )
( 12 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
)
( avgpool) : AdaptiveAvgPool2d( output_size= ( 6 , 6 ) )
( classifier) : Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
)
)
- - - - - - - - - - - - - - - - - - - - - 修改后- - - - - - - - - - - - - - - - - - - - - - -
AlexNet(
( features) : Sequential(
( 0 ) : Conv2d( 3 , 64 , kernel_size= ( 11 , 11 ) , stride= ( 4 , 4 ) , padding= ( 2 , 2 ) )
( 1 ) : ReLU( inplace= True )
( 2 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 3 ) : Conv2d( 64 , 192 , kernel_size= ( 5 , 5 ) , stride= ( 1 , 1 ) , padding= ( 2 , 2 ) )
( 4 ) : ReLU( inplace= True )
( 5 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 6 ) : Conv2d( 192 , 384 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 7 ) : ReLU( inplace= True )
( 8 ) : Conv2d( 384 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 9 ) : ReLU( inplace= True )
( 10 ) : Conv2d( 256 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 11 ) : ReLU( inplace= True )
( 12 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
)
( avgpool) : AdaptiveAvgPool2d( output_size= ( 6 , 6 ) )
( classifier) : Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
)
)
alexNet = models. alexnet( True )
print ( alexNet. classifier)
print ( "----------------修改后----------------------------" )
alexNet. classifier = alexNet. classifier[ : - 2 ]
print ( alexNet. classifier)
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
)
- - - - - - - - - - - - - - - - 修改后- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
)
alexNet = models. alexnet( True )
print ( alexNet. classifier)
print ( "----------------修改后----------------------------" )
alexNet. classifier[ 6 ] = nn. Linear( in_features= 4096 , out_features= 1024 )
print ( alexNet. classifier)
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
)
- - - - - - - - - - - - - - - - 修改后- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1024 , bias= True )
)
alexNet = models. alexnet( True )
print ( alexNet. classifier)
print ( "----------------修改后----------------------------" )
alexNet. classifier. add_module( "7" , nn. ReLU( inplace= True ) )
alexNet. classifier. add_module( "8" , nn. Linear( in_features= 1024 , out_features = 20 ) )
print ( alexNet. classifier)
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
)
- - - - - - - - - - - - - - - - 修改后- - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
( 7 ) : ReLU( inplace= True )
( 8 ) : Linear( in_features= 1024 , out_features= 20 , bias= True )
)
alexNet = models. alexnet( True )
print ( alexNet. classifier)
print ( "----------------修改后----------------------------" )
block = nn. Sequential( nn. ReLU( inplace= True ) ,
nn. Linear( in_features= 1024 , out_features= 20 )
)
alexNet. classifier. add_module( "block" , block)
print ( alexNet)
Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
)
- - - - - - - - - - - - - - - - 修改后- - - - - - - - - - - - - - - - - - - - - - - - - - - -
AlexNet(
( features) : Sequential(
( 0 ) : Conv2d( 3 , 64 , kernel_size= ( 11 , 11 ) , stride= ( 4 , 4 ) , padding= ( 2 , 2 ) )
( 1 ) : ReLU( inplace= True )
( 2 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 3 ) : Conv2d( 64 , 192 , kernel_size= ( 5 , 5 ) , stride= ( 1 , 1 ) , padding= ( 2 , 2 ) )
( 4 ) : ReLU( inplace= True )
( 5 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
( 6 ) : Conv2d( 192 , 384 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 7 ) : ReLU( inplace= True )
( 8 ) : Conv2d( 384 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 9 ) : ReLU( inplace= True )
( 10 ) : Conv2d( 256 , 256 , kernel_size= ( 3 , 3 ) , stride= ( 1 , 1 ) , padding= ( 1 , 1 ) )
( 11 ) : ReLU( inplace= True )
( 12 ) : MaxPool2d( kernel_size= 3 , stride= 2 , padding= 0 , dilation= 1 , ceil_mode= False )
)
( avgpool) : AdaptiveAvgPool2d( output_size= ( 6 , 6 ) )
( classifier) : Sequential(
( 0 ) : Dropout( p= 0.5 , inplace= False )
( 1 ) : Linear( in_features= 9216 , out_features= 4096 , bias= True )
( 2 ) : ReLU( inplace= True )
( 3 ) : Dropout( p= 0.5 , inplace= False )
( 4 ) : Linear( in_features= 4096 , out_features= 4096 , bias= True )
( 5 ) : ReLU( inplace= True )
( 6 ) : Linear( in_features= 4096 , out_features= 1000 , bias= True )
( block) : Sequential(
( 0 ) : ReLU( inplace= True )
( 1 ) : Linear( in_features= 1024 , out_features= 20 , bias= True )
)
)
)