使用pytorch构建普适性的神经网络模型,可以自己定义卷积层、全连接层的数量。代码如下:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
class FlexibleNetwork(nn.Module):
def __init__(self, num_conv_layers, num_fc_layers, conv_kernel_sizes, pool_sizes, input_size):
super(FlexibleNetwork, self).__init__()
self.input_size = input_size # 存储输入数据的维度
self.conv_layers = nn.ModuleList()
in_channels = input_size[0] # 输入通道数根据输入数据维度确定
# 添加卷积层
for i in range(num_conv_layers):
out_channels = 64 # 修改为你需要的输出通道数
kernel_size = conv_kernel_sizes[i]
self.conv_layers.append(nn.Conv2d(in_channels, out_channels, kernel_size))
in_channels = out_channels
# 添加池化层
self.pool_layers = nn.ModuleList()
for pool_size in pool_sizes:
self.pool_layers.append(nn.MaxPool2d(pool_size))
# 添加全连接层
self.fc_layers = nn.ModuleList()
fc_input_size = self.calculate_fc_input_size() # 计算全连接层输入大小
for i in range(num_fc_layers):
fc_output_size = 256 # 修改为你需要的全连接层输出大小
self.fc_layers.append(nn.Linear(fc_input_size, fc_output_size))
fc_input_size = fc_output_size
def forward(self, x):
for conv_layer, pool_layer in zip(self.conv_layers, self.pool_layers):
x = F.relu(conv_layer(x))
x = pool_layer(x)
x = x.view(x.size(0), -1)
for fc_layer in self.fc_layers:
x = F.relu(fc_layer(x))
return x
def calculate_fc_input_size(self):
# 计算全连接层的输入大小
x = torch.randn(1, *self.input_size)
for conv_layer, pool_layer in zip(self.conv_layers, self.pool_layers):
x = F.relu(conv_layer(x))
x = pool_layer(x)
x = x.view(x.size(0), -1)
return x.size(1)
def summary(self):
summary(self, input_size=self.input_size)
# 创建一个灵活的网络
num_conv_layers = 2
num_fc_layers = 2
conv_kernel_sizes = [3, 3]
pool_sizes = [2, 2]
input_size = (3, 224, 224) # 输入数据的维度
model = FlexibleNetwork(num_conv_layers, num_fc_layers, conv_kernel_sizes, pool_sizes, input_size)
# 输出模型结构
model.summary()