PyTorch实现经典网络结构-FC+LeNet+VGG+GoogleNet+ResNet

PyTorch实现经典网络结构

Reference:

1.FC

# 全连接神经网络
class FCNet(nn.Module):
    """
    隐藏层1024个神经元的三层神经网络
    """

    def __init__(self, input_shape, out_dim):
        super(FCNet, self).__init__()
        self.layer1 = nn.Sequential(nn.Linear(input_shape, 1024), nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(1024, out_dim), nn.ReLU(True))

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)

        return x

2.LeNet5

class LeNet5(nn.Module):
    # 输入大小已定,为32*32的灰度图像
    def __init__(self, input_shape, out_dim):
        super(LeNet5, self).__init__()

        self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 5, padding=2),  # 卷积层1,1个输入通道,6个卷积核,卷积核大小为5
                                  nn.ReLU(True),
                                  nn.MaxPool2d(kernel_size=2)
                                   )

        self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5),             # 卷积层2,6个输入通道,16个卷积核,卷积核大小为5
                                   nn.ReLU(True),
                                   nn.MaxPool2d(2,2)
                                   )

        self.fc = nn.Sequential(nn.Linear(16*6*6, 120),
                                nn.ReLU(True),
                                nn.Linear(120, 84),
                                nn.ReLU(True),
                                nn.Linear(84, out_dim)
                                )

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)

        x = x.view(-1, self.num_flat_features(x))

        x = self.fc(x)

        return x

    def num_flat_features(self, x):
        # 计算展成一位数据时的神经元个数
        size = x.size()[1:]
        # print(size)

        num_features = 1
        for s in size:
            num_features *= s

        return num_features

测试:

net = LeNet5((1, 32, 32), 10)

input = torch.randn(1, 1, 32, 32)

out = net(torch.autograd.Variable(input))

print(out.data)

输出:

tensor([[ 0.1051, -0.0926, -0.0695,  0.0994, -0.0164,  0.1064,  0.0065, -0.0811,
          0.0490, -0.0519]])

3.VGG16

class VGG16(nn.Module):
    # 输入不固定,16的倍数即可
    def __init__(self, input_shape, out_dim):
        super(VGG16, self).__init__()

        self.conv1 = nn.Sequential(nn.Conv2d(3, 64, 3),  # 输入为3通道,64个卷积核,卷积核大小为3
                                   nn.ReLU(),
                                   nn.Conv2d(64, 64, 3, padding=(1,1)),
                                   nn.ReLU(),
                                   nn.MaxPool2d((2,2), padding=(1, 1))
                                   )

        self.conv2 = nn.Sequential(nn.Conv2d(64, 128, 3),
                                   nn.Conv2d(128, 128, 3, padding=(1,1)),
                                   nn.MaxPool2d((2,2), padding=(1,1))
                                   )

        self.conv3 = nn.Sequential(nn.Conv2d(128, 256, 3),
                                   nn.Conv2d(256, 256, 3, padding=(1,1)),
                                   nn.Conv2d(256, 256, 3, padding=(1,1)),
                                   nn.MaxPool2d((2,2), padding=(1,1))
                                   )


        self.conv4 = nn.Sequential(nn.Conv2d(256, 512, 3),
                                   nn.Conv2d(512, 512, 3, padding=(1,1)),
                                   nn.Conv2d(512, 512, 3, padding=(1,1)),
                                   nn.MaxPool2d((2,2), padding=(1,1))
                                   )

        # 全连接层
        self.fc = nn.Sequential(nn.Linear(512*input_shape[1] // 16 * input_shape[2] // 16, 4096),
                                nn.Linear(4096, 4096),
                                nn.Linear(4096, out_dim)
                                )


    def forward(self, x):
        x = self.conv1(x)
        print(x.size())
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)

        x = x.view(-1, self.num_flat_features(x))

        return self.fc(x)

测试:

# vgg测试
net = VGG16((3, 128, 128), 10)

input = torch.randn(1, 3, 128, 128)

out = net(torch.autograd.Variable(input))

print(out.data)

输出:

torch.Size([1, 512, 8, 8])
tensor([[-0.0004, -0.0099, -0.0105,  0.0113, -0.0046, -0.0140,  0.0027, -0.0021,
      -0.0140,  0.0010]])

4.GoogleNet

class Inception(nn.Module):
    def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
        super(Inception, self).__init__()
        # 1x1 卷积
        self.conv1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1),
                                   nn.BatchNorm2d(n1x1),
                                   nn.ReLU(True)
                                   )
        # 1x1卷积 + 3x3卷积
        self.conv2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1),
                                   nn.BatchNorm2d(n3x3red),
                                   nn.ReLU(True),
                                   nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
                                   nn.BatchNorm2d(n3x3),
                                   nn.ReLU(True)
                                   )

        # 1x1卷积 + 5x5卷积
        self.conv3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1),
                                   nn.BatchNorm2d(n5x5red),
                                   nn.ReLU(True),
                                   nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
                                   nn.BatchNorm2d(n5x5),
                                   nn.ReLU(True),
                                   nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
                                   nn.BatchNorm2d(n5x5),
                                   nn.ReLU(True)
                                   )

        # 3x3卷积 + 1x1卷积
        self.conv4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1),
                                   nn.Conv2d(in_planes, pool_planes, kernel_size=1),
                                   nn.BatchNorm2d(pool_planes),
                                   nn.ReLU(True)
                                   )


    def forward(self, x):
        out1 = self.conv1(x)
        out2 = self.conv2(x)
        out3 = self.conv3(x)
        out4 = self.conv4(x)

        return torch.cat([out1, out2, out3, out4], 1)

# GoogleNet
class GoogleNet(nn.Module):
    def __init__(self, out_dim):
        super(GoogleNet, self).__init__()

        self.pre_layer = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1),
                                       nn.BatchNorm2d(192),
                                       nn.ReLU(True)
                                       )

        self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

        self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
        self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
        self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
        self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.linear = nn.Linear(1024, out_dim)

    def forward(self, x):
        x = self.pre_layer(x)
        x = self.a3(x)
        x = self.b3(x)
        x = self.maxpool(x)
        x = self.a4(x)
        x = self.b4(x)
        x = self.c4(x)
        x = self.d4(x)
        x = self.e4(x)
        x = self.maxpool(x)
        x = self.a5(x)
        x = self.b5(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.linear(x)

        return x

测试:

net = GoogleNet(10)

input = torch.randn(1, 3, 32, 32)

out = net(torch.autograd.Variable(input))

print(out.data)

输出:

tensor([[-0.0747, -0.0275,  0.0790, -0.0833, -0.0860, -0.1031, -0.0694,  0.3045,
         -0.0731, -0.2024]])

5.ResNet

class ResidualBlock(nn.Module):
    def __init__(self, in_channel, out_channel, stride=1, shortcut=None):
        super(ResidualBlock, self).__init__()

        self.left = nn.Sequential(nn.Conv2d(in_channel, out_channel, 3, stride, 1, bias=False),
                                  nn.BatchNorm2d(out_channel),
                                  nn.ReLU(inplace=True),
                                  nn.Conv2d(out_channel, out_channel, 3, 1, 1, bias=False),
                                  nn.BatchNorm2d(out_channel)
                                  )
        self.right = shortcut

    def forward(self, x):
        out = self.left(x)
        residual = x if self.right is None else self.right(x)
        out += residual

        return F.relu(out)    # 这里使用nn.ReLU()会报错


class ResNet(nn.Module):
    def __init__(self, out_dim):
        super(ResNet, self).__init__()

        self.pre_layers = nn.Sequential(
            nn.Conv2d(3, 64, 7, 2, 3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2, 1)
        )

        self.layer1 = self._make_layer(64, 128, 3)
        self.layer2 = self._make_layer(128, 256, 4, stride=2)
        self.layer3 = self._make_layer(256, 512, 6, stride=2)
        self.layer4 = self._make_layer(512, 512, 3, stride=2)

        self.fc = nn.Linear(512, out_dim)

    def _make_layer(self, in_channel, out_channel, block_num, stride=1):
        shortcut = nn.Sequential(nn.Conv2d(in_channel, out_channel, 1, stride, bias=False),
                                 nn.BatchNorm2d(out_channel)
                                 )

        layers = []
        layers.append(ResidualBlock(in_channel, out_channel, stride, shortcut))

        for i in range(1, block_num):
            layers.append(ResidualBlock(out_channel, out_channel))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.pre_layers(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = F.avg_pool2d(x, 7)
        x = x.view(x.size(0), -1)
        return self.fc(x)

测试:

net = ResNet(10)

input = torch.randn(1, 3, 224, 224)

out = net(torch.autograd.Variable(input))
print(out.data)

输出:

tensor([[ 0.1972, -0.3698, -0.1574,  0.5450,  0.4381, -0.4979,  0.3620, -0.0750,
         -0.9074, -0.0504]])
  • 3
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值