用paddle复现darknet53
先写一个很基础的卷积层,并且加上正则化和图片的处理
ch_in表示输入时的通道数量,ch_out输出时的通道数量,kernel_size表示一个卷积核的大小,是kernel_size**2
import paddle. nn. functional as F
class ConvBNLayer ( paddle. nn. Layer) :
"""基础的一个层"""
def __init__ ( self, ch_in, ch_out, kernel_size= 3 , stride= 1 ,
groups= 1 , padding= 0 , act= "leaky" ) :
super ( ConvBNLayer, self) . __init__( )
self. conv= paddle. nn. Conv2D(
in_channels= ch_in,
out_channels= ch_out,
kernel_size= kernel_size,
stride= stride,
padding= padding,
groups= groups,
weight_attr= paddle. ParamAttr(
initializer= paddle. nn. initializer. Normal( 0 . , 0.02 ) ) ,
bias_attr= False
)
self. batch_norm= paddle. nn. BatchNorm2D(
num_features= ch_out,
weight_attr= paddle. ParamAttr(
initializer= paddle. nn. initializer. Normal( 0 . , 0.02 ) ,
regularizer= paddle. regularizer. L2Decay( 0 . ) ) ,
bias_attr= paddle. ParamAttr(
initializer= paddle. nn. initializer. Constant( 0.0 ) ,
regularizer= paddle. regularizer. L2Decay( 0 . ) )
)
self. act= act
def forward ( self, inputs) :
out= self. conv( inputs)
out= self. batch_norm( inputs)
if self. act== 'leaky' :
out= F. leaky_relu( x= out, negative_slope= 0.1 )
return out
让图片大小尺寸减半的层
class DownSample ( paddle. nn. Layer) :
"""尺度减半,采取的方式是使用stride=2的卷积"""
def __init__ ( self,
ch_in,
ch_out,
kernel_size= 3 ,
stride= 2 ,
padding= 1 ) :
super ( DownSample, self) . __init__( )
self. convn_bn_layer= ConvBNLayer(
ch_in= ch_in,
ch_out= ch_out,
kernel_size= kernel_size,
stride= stride,
padding= padding)
self. ch_out= ch_out
def forward ( self, inputs) :
out= self. convn_bn_layer( inputs)
return out
基础残差层,表示对输入进行两次卷积后,将输入与两次卷积后的输出加在一起
class BasicBlock ( paddle. nn. Layer) :
"""创建了一个基础的残差块"""
def __init__ ( self,
ch_in,
ch_out) :
super ( BasicBlock, self) . __init__( )
self. conv1= ConvBNLayer(
ch_in= ch_in,
ch_out= ch_out,
kernel_size= 1 ,
stride= 1 ,
padding= 0
)
self. conv2= ConvBNLayer(
ch_in= ch_out,
ch_out= ch_out* 2 ,
kernel_size= 3 ,
stride= 1 ,
padding= 1
)
def forward ( self, inputs) :
conv1= self. conv1( inputs)
conv2= self. conv2( conv1)
out= paddle. add( x= inputs, y= conv2)
return out
darknet网络分成了好几个阶段,每个阶段对每个残差块会做好几次。这里对于每个阶段的残差块,进行多次(count)循环处理
class LayerWarp ( paddle. nn. Layer) :
"""对于每个阶段的残差块,现在开始处理,count里面跑了几次"""
def __init__ ( self, ch_in, ch_out, count, is_test= True ) :
super ( LayerWarp, self) . __init__( )
self. basicblock0= BasicBlock( ch_in, ch_out)
self. res_out_list= [ ]
for i in range ( 1 , count) :
res_out= self. add_sublayer( 'basic_block_%d' % ( i) ,
BasicBlock( ch_out* 2 , ch_out) )
self. res_out_list. append( res_out)
def forward ( self, inputs) :
y= self. basicblock0( inputs)
for basic_block_i in self. res_out_list:
y= basic_block_i( y)
return y
整个darknet53的整体
层数为2+(1+2+8+8+4)*2+4=52
然后再加一个全连接层就是53
DarkNet_cfg= { 53 : ( [ 1 , 2 , 8 , 8 , 4 ] ) }
class DarkNet53_conv_body ( paddle. nn. Layer) :
def __init__ ( self) :
super ( DarkNet53_conv_body, self) . __init__( )
self. stage= DarkNet_cfg[ 53 ]
self. stages= self. stage[ 0 : 5 ]
self. conv0= ConvBNLayer( ch_in= 3 , ch_out= 32 ,
kernel_size= 3 , stride= 1 , padding= 1 )
self. downsample0= DownSample( ch_in= 32 , ch_out= 64 )
self. darknet53_conv_block_list= [ ]
self. downsample_list= [ ]
for i, stage in enumerate ( self. stages) :
conv_block= self. add_sublayer( "stage_%d" % ( i) ,
LayerWarp( ch_in= 32 * ( 2 ** ( i+ 1 ) ) ,
ch_out= 32 * ( 2 ** i) ,
count= stage) )
self. darknet53_conv_block_list. append( conv_block)
for i in range ( len ( self. stages) - 1 ) :
downsample= self. add_sublayer( name= "stage_%d_downsample" % i,
sublayer= DownSample( ch_in= 32 * ( 2 ** ( i+ 1 ) ) ,
ch_out= 32 * ( 2 ** ( i+ 2 ) ) ) )
self. downsample_list. append( downsample)
def forward ( self, inputs) :
out= self. conv0( inputs)
out= self. downsample0( out)
blocks= [ ]
for i, conv_block_i in enumerate ( self. darknet53_conv_block_list) :
out= conv_block_i( out)
blocks. append( out)
if i< len ( self. stages) - 1 :
out= self. downsample_list[ i] ( out)
return blocks[ - 1 : - 4 : - 1 ]