Pytorch 手动实现facenet 网络结构

Facenet NN2
网络结构图
在这里插入图片描述
网络结构实现

import torch
import torch.nn as nn
import numpy as np
from torchsummary import summary
from padding_same_conv import Conv2d
#https://blog.csdn.net/Lauyeed/article/details/79514839

def l1_norm(input, axit=1):
    norm = torch.norm(input,1,axit,True)
    output = torch.div(input, norm)
    return output

def l2_norm(input, axit=1):
    norm = torch.norm(input,2,axit,True)
    output = torch.div(input, norm)
    return output


class InecptionBlock(nn.Module):
    expansion = 1
    def __init__(self, input_channels,st, b1, b3_1, b3_3, b5_1, b5_5, M1, mps, mp):
        print(input_channels,st, b1, b3_1, b3_3, b5_1, b5_5, M1, mps, mp)
        super().__init__()
        self.b1 = b1
        self.M1 = M1
        self.mp = mp
        if b1>0:
            self.branch1 = Conv2d(input_channels, b1, kernel_size=1, stride=1, bias=False)

        self.branch3_1 = Conv2d(input_channels, b3_1, kernel_size=1, stride=1,bias=False)
        self.branch3_3 = Conv2d(b3_1, b3_3, kernel_size=3, stride=st, bias=False)

        self.branch5_1 = Conv2d(input_channels, b5_1, kernel_size=1, stride=1, bias=False)
        self.branch5_5 = Conv2d(b5_1, b5_5, kernel_size=5, stride=st, bias=False)

        if mp == 1:
            self.Maxpool = nn.MaxPool2d(kernel_size=mps, stride=st, padding=(1,1))

        if M1>0:
           self.Max1 = Conv2d(input_channels, M1, kernel_size=1, stride=1, bias=False)

    def forward(self, x):

        print('-------------inception input shape---------------')
        print(x.shape)

        print('-------------inception shape---------------')
        #x -> 1x1(same)
        if self.b1>0:
            branch1 = self.branch1(x)

        # x -> 1x1 -> 3x3 -> 3x3(same)
        branch3_1 = self.branch3_1(x)
        branch3_3 = self.branch3_3(branch3_1)

        #x -> 1x1 -> 5x5(same)
        branch5_1 = self.branch5_1(x)
        branch5_5 = self.branch5_5(branch5_1)

        #x -> pool -> 1x1(same)
        if self.mp == 1:
            branch = self.Maxpool(x)
        else:
            branch = l2_norm(x)
            #branch = x

        if self.b1>0:
            max1 = self.Max1(branch)
            outputs = [branch1, branch3_3, branch5_5, max1]

        else:
            outputs = [branch3_3, branch5_5, branch]
        print('outputs')
        for i in outputs:
            print(i.shape)

        return torch.cat(outputs, 1)

class FaceNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.con1 = Conv2d(3, 64, 7, stride=2)
        self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=(1,1))
        self.ipt2_1 = Conv2d(64, 64, kernel_size=1, stride=1)
        self.ipt2_2 = Conv2d(64, 192, kernel_size=3, stride=1)
        self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=(1,1))

        # inception3a
        self.in3a = InecptionBlock(192, 1, 64, 96, 128, 16, 32, 32, 3, 1)

        # inception3b
        self.in3b = InecptionBlock(256, 1, 64, 96, 128, 32, 64, 64, 0, 0)

        # inception3c ####
        self.in3c = InecptionBlock(320, 2, 0, 128, 256, 32, 64, 0, 3, 1)

        # inception4a
        self.in4a = InecptionBlock(640, 1, 256, 96, 192, 32, 64, 128, 0, 0)

        # inception4b
        self.in4b = InecptionBlock(640, 1, 224, 112, 224, 32, 64, 128, 0, 0)

        # inception4c
        self.in4c = InecptionBlock(640, 1, 192, 128, 256, 32, 64, 128, 0, 0)

        # inception4d
        self.in4d = InecptionBlock(640, 1, 160, 144, 288, 32, 64, 128, 0, 0)

        # inception4e
        self.in4e = InecptionBlock(640, 2, 0, 160, 256, 64, 128, 0, 3, 1)

        # inception5a
        self.in5a = InecptionBlock(1024, 1, 384, 192, 384, 48, 128, 128, 0, 0)

        # inception5b
        self.in5b = InecptionBlock(1024, 1, 384, 192, 384, 48, 128, 128, 3, 1)

        #avg pool
        self.avg_pool = nn.MaxPool2d(kernel_size=7, stride=1)
        #fc
        self.fc = nn.Linear(1024, 128)

    def forward(self, input):
        print('-------------model input shape---------------')
        print(input.shape)
        print('-------------model con1 shape---------------')
        out = self.con1(input)
        print(out.shape)
        out = self.max_pool1(out)
        out = l1_norm(out)
        print(out.shape)
        out = self.ipt2_1(out)
        print(out.shape)
        out = self.ipt2_2(out)
        print(out.shape)
        out = l1_norm(out)
        out = self.max_pool2(out)
        print(out.shape)
        out = self.in3a(out)
        out = self.in3b(out)
        out = self.in3c(out)
        out = self.in4a(out)
        out = self.in4b(out)
        out = self.in4c(out)
        out = self.in4d(out)
        out = self.in4e(out)
        out = self.in5a(out)
        out = self.in5b(out)
        print('----------5b----------')
        print(out.shape)
        out = self.avg_pool(out)
        print('----------avg pool----------')
        print(out.shape)
        out = out.view(-1, 1024)
        print('----------view----------')
        print(out.shape)
        out = self.fc(out)
        #l2
        out = l2_norm(out)
        return out

if __name__ == '__main__':
    model = FaceNet()
    print(model)

    a = torch.randn((1, 3, 224, 224))
    output = model(a)
    print('----------output shape----------')
    print(output.shape)

打印网络结构

FaceNet(
(con1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2))
(max_pool1): MaxPool2d(kernel_size=3, stride=2, padding=(1, 1), dilation=1, ceil_mode=False)
(ipt2_1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))
(ipt2_2): Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1))
(max_pool2): MaxPool2d(kernel_size=3, stride=2, padding=(1, 1), dilation=1, ceil_mode=False)
(in3a): InecptionBlock(
(branch1): Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(192, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(192, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Maxpool): MaxPool2d(kernel_size=3, stride=1, padding=(1, 1), dilation=1, ceil_mode=False)
(Max1): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in3b): InecptionBlock(
(branch1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(256, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Max1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in3c): InecptionBlock(
(branch3_1): Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), bias=False)
(branch5_1): Conv2d(320, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(32, 64, kernel_size=(5, 5), stride=(2, 2), bias=False)
(Maxpool): MaxPool2d(kernel_size=3, stride=2, padding=(1, 1), dilation=1, ceil_mode=False)
)
(in4a): InecptionBlock(
(branch1): Conv2d(640, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(640, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(96, 192, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(640, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Max1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in4b): InecptionBlock(
(branch1): Conv2d(640, 224, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(640, 112, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(112, 224, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(640, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Max1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in4c): InecptionBlock(
(branch1): Conv2d(640, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(640, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Max1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in4d): InecptionBlock(
(branch1): Conv2d(640, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(640, 144, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(144, 288, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(640, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Max1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in4e): InecptionBlock(
(branch3_1): Conv2d(640, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(160, 256, kernel_size=(3, 3), stride=(2, 2), bias=False)
(branch5_1): Conv2d(640, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(64, 128, kernel_size=(5, 5), stride=(2, 2), bias=False)
(Maxpool): MaxPool2d(kernel_size=3, stride=2, padding=(1, 1), dilation=1, ceil_mode=False)
)
(in5a): InecptionBlock(
(branch1): Conv2d(1024, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(1024, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Max1): Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(in5b): InecptionBlock(
(branch1): Conv2d(1024, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_1): Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch3_3): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), bias=False)
(branch5_1): Conv2d(1024, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(branch5_5): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), bias=False)
(Maxpool): MaxPool2d(kernel_size=3, stride=1, padding=(1, 1), dilation=1, ceil_mode=False)
(Max1): Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
(avg_pool): MaxPool2d(kernel_size=7, stride=1, padding=0, dilation=1, ceil_mode=False)
(fc): Linear(in_features=1024, out_features=128, bias=True)
)

参考:GitHub 实现的pytorch padding = same 代码
https://github.com/Oldpan/Faceswap-Deepfake-Pytorch/blob/master/models.py

  • 2
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值