LeNet结构:
LeNet.py
import paddle
import numpy as np
from paddle.nn import Conv2D,MaxPool2D,Linear
import paddle.nn.functional as F
# define LeNet
class LeNet(paddle.nn.Layer):
def __init__(self,num_classes=1):
super(LeNet, self).__init__()
""" a first conv+pool """
self.conv1 = Conv2D(in_channels=1,out_channels=6,kernel_size=5)
self.max_pool1 = MaxPool2D(kernel_size=2,stride=2)
""" a second conv+pool """
self.conv2 = Conv2D(in_channels=6,out_channels=16,kernel_size=5)
self.max_pool2 = MaxPool2D(kernel_size=2,stride=2)
""" a third conv """
self.conv3 = Conv2D(in_channels=16,out_channels=120,kernel_size=4)
# c,h,w = 120,1,1
""" a first linear """
self.fc1 = Linear(in_features=120,out_features=64)
""" a second linear """
self.fc2 = Linear(in_features=64,out_features=num_classes)
def forward(self, *inputs, **kwargs):
""" 前向传播 """
x = self.conv1(inputs) # 卷积
x = F.sigmoid(x) # 激活
x = self.max_pool1(x) # 池化
x = self.conv2(x) # 卷积
x = F.sigmoid(x) # 激活
x = self.max_pool2(x) # 池化
x = self.conv3(x) # 卷积
x = paddle.reshape(x,[x.shape[0],-1]) # [B,C,H,W]->[B,C*H*W]
x = self.fc1(x)
x = F.sigmoid(x)
x = self.fc2(x)
return x
构造数据查看各层数据的shape变化
import numpy as np
import paddle
from LeNet import LeNet
x = np.random.randn(*[3,1,28,28]) # create a random data, shape: [N,1,H,W]
x = x.astype('float32')
model = LeNet(num_classes=10)
# print(model.sublayers())
x = paddle.to_tensor(x) # numpy to tensor
for item in model.sublayers():
# item是LeNet类的一个子层,查看子层的输出形状
try:
x = item(x)
except:
x = paddle.reshape(x,[x.shape[0],-1])
x = item(x)
if len(item.parameters())==2: # 其中item.parameters()[0]是权重参数w,item.parameters()[1]是偏置参数b
print("*"*50)
print(item.full_name())
print(x.shape)
print(item.parameters()[0].shape, item.parameters()[1].shape)
else:
print("*"*50)
print("this is a max_pooling layer, no parameters.")
print(item.full_name(),x.shape)
输出:
**************************************************
conv2d_0
[3, 6, 24, 24]
[6, 1, 5, 5] [6]
**************************************************
this is a max_pooling layer, no parameters.
max_pool2d_0 [3, 6, 12, 12]
**************************************************
conv2d_1
[3, 16, 8, 8]
[16, 6, 5, 5] [16]
**************************************************
this is a max_pooling layer, no parameters.
max_pool2d_1 [3, 16, 4, 4]
**************************************************
conv2d_2
[3, 120, 1, 1]
[120, 16, 4, 4] [120]
**************************************************
linear_0
[3, 64]
[120, 64] [64]
**************************************************
linear_1
[3, 10]
[64, 10] [10]