pytorch 图像分类(3)

1.DenseNet

再一个block内,每次操作层的输入都是前面所有层的结果的结合

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
class BN_Conv2d(nn.Module):
    def __init__(self,in_channels,out_channels,kernel_size,stride,padding,dilation=1,groups=1,bias=False):
        super(BN_Conv2d, self).__init__()
        self.seq=nn.Sequential(
            nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation,groups=groups,bias=bias),
            nn.BatchNorm2d(out_channels)

        )

    def forward(self,x):
        return F.relu(self.seq(x))

class DenseBlock(nn.Module):
    def __init__(self,in_channels,num_layers,growth_rate):
        super(DenseBlock, self).__init__()
        self.num_layers=num_layers
        self.k0=in_channels
        self.k=growth_rate
        self.layers=self._make_layers()

    def _make_layers(self):
        layer_list=[]
        for i in range(self.num_layers):
            layer_list.append(nn.Sequential(
                BN_Conv2d(self.k0+i*self.k,4*self.k,1,1,0),
                BN_Conv2d(4*self.k,self.k,3,1,1)
            ))
        return layer_list

    def forward(self,x):
        feature=self.layers[0](x)
        out=torch.cat((x,feature),1)
        for i in range(1,len(self.layers)):
            feature=self.layers[i](out)
            out=torch.cat((feature,out),1)
        return out


class DenseNet(nn.Module):
    def __init__(self,layers,k,theta,num_class):
        super(DenseNet, self).__init__()
        self.layers=layers
        self.k=k
        self.theta=theta
        self.conv=BN_Conv2d(3,2*k,7,2,3)
        self.blocks,patches=self._make_blocks(2*k)
        self.fc=nn.Linear(patches,num_class)

    def _make_transition(self,in_chls):
        out_chls=int(self.theta*in_chls)
        return nn.Sequential(
            BN_Conv2d(in_chls,out_chls,1,1,0),
            nn.AvgPool2d(2)
        ),out_chls

    def __make_blocks(self, k0):
        """
        make block-transition structures
        :param k0:
        :return:
        """
        layers_list = []
        patches = 0
        for i in range(len(self.layers)):
            layers_list.append(DenseBlock(k0, self.layers[i], self.k))
            patches = k0 + self.layers[i] * self.k  # output feature patches from Dense Block
            if i != len(self.layers) - 1:
                transition, k0 = self.__make_transition(patches)
                layers_list.append(transition)
        return nn.Sequential(*layers_list), patches

    def forward(self, x):
        out = self.conv(x)
        out = F.max_pool2d(out, 3, 2, 1)
        # print(out.shape)
        out = self.blocks(out)
        # print(out.shape)
        out = F.avg_pool2d(out, 7)
        # print(out.shape)
        out = out.view(out.size(0), -1)
        out = F.softmax(self.fc(out))
        return out

def densenet_121(num_classes=1000):
    return DenseNet([6, 12, 24, 16], k=32, theta=0.5, num_classes=num_classes)


def densenet_169(num_classes=1000):
    return DenseNet([6, 12, 32, 32], k=32, theta=0.5, num_classes=num_classes)


def densenet_201(num_classes=1000):
    return DenseNet([6, 12, 48, 32], k=32, theta=0.5, num_classes=num_classes)


def densenet_264(num_classes=1000):
    return DenseNet([6, 12, 64, 48], k=32, theta=0.5, num_classes=num_classes)


def test():
    net = densenet_264()
    summary(net, (3, 224, 224))
    x = torch.randn((2, 3, 224, 224))
    y = net(x)
    print(y.shape)

test()

2.SENet

首先把特征图压缩成一个通道向量,然后通过降采样和上采样过滤出通道的权重,(有点像opencv里的开运算:先腐蚀再膨胀,用来消除小物体)

import  torch.nn as nn
import  torch
import torch.nn.functional as F

class SE(nn.Module):
    def __init__(self,in_chnls,ratio):
        super(SE, self).__init__()
        self.squeeze=nn.AdaptiveAvgPool2d((1,1))
        self.compress=nn.Conv2d(in_chnls,in_chnls//ratio,1,1,0)
        self.excitation=nn.Conv2d(in_chnls//ratio,in_chnls,1,1,0)


    def forward(self,x):
        out=self.squeeze(x)
        out=self.compress(out)
        out=F.relu(out)
        out=self.excitation(out)
        return F.sigmoid(out)


3.DarkNet

和Resnet差不多,不过resnet是一个bottleneck 先是1x1,然后3x3 最后1x1 ,darknet就两部1x1 然后3x3

import  torch.nn as nn
from torchsummary import summary
import torch.nn.functional as F
from  senet1 import  SE
class BN_Conv2d_Leaky(nn.Module):
    def __init__(self,in_channels,out_channels,kernel_size,stride,padding,dilation=1,groups=1,bias=False):
        super(BN_Conv2d_Leaky, self).__init__()
        self.seq=nn.Sequential(
            nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation, groups=groups, bias=bias),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self,x):
        return F.leaky_relu(self.seq(x))

class BN_Conv2d(nn.Module):
    def __init__(self,in_channels,out_channels,kernel_size,stride,padding,dilation=1,groups=1,bias=False):
        super(BN_Conv2d, self).__init__()
        self.seq=nn.Sequential(
            nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding,dilation=dilation, groups=groups, bias=bias),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self,x):
        return self.seq(x)

class Dark_block(nn.Module):
    def __init__(self,channels,is_se=False,inner_channnels=None):
        super(Dark_block, self).__init__()
        self.is_se=is_se
        if inner_channnels is None:
            inner_channnels=channels//2

        self.conv1=BN_Conv2d_Leaky(channels,inner_channnels,1,1,0)
        self.conv2=nn.Conv2d(inner_channnels,channels,3,1,1)
        self.bn=nn.BatchNorm2d(channels)
        if self.is_se:
            self.se=SE(channels,16)

    def forward(self,x):
        out=self.conv1(x)
        out=self.conv2(out)
        out=self.bn(out)
        if self.is_se:
            coefficient=self.se(out)
            out*=coefficient
        out+=x
        return F.leaky_relu(out)


class DarkNet(nn.Module):
    def __init__(self,layers,num_class,is_se=False):
        super(DarkNet, self).__init__()
        self.is_se=is_se
        filters=[64,128,256,512,1024]

        self.conv1 = BN_Conv2d(3, 32, 3, 1, 1)
        self.redu1 = BN_Conv2d(32, 64, 3, 2, 1)
        self.conv2=self._make_layers(filters[0],layers[0])
        self.redu2 = BN_Conv2d(filters[0], filters[1], 3, 2, 1)
        self.conv3 = self.__make_layers(filters[1], layers[1])
        self.redu3 = BN_Conv2d(filters[1], filters[2], 3, 2, 1)
        self.conv4 = self.__make_layers(filters[2], layers[2])
        self.redu4 = BN_Conv2d(filters[2], filters[3], 3, 2, 1)
        self.conv5 = self.__make_layers(filters[3], layers[3])
        self.redu5 = BN_Conv2d(filters[3], filters[4], 3, 2, 1)
        self.conv6 = self.__make_layers(filters[4], layers[4])
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(filters[4], num_class)


    def _make_layers(self,num_filter,num_layers):
        layers=[]
        for _ in range(num_layers):
            layers.append(Dark_block(num_filter,self.is_se))
        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.conv1(x)
        out = self.redu1(out)
        out = self.conv2(out)
        out = self.redu2(out)
        out = self.conv3(out)
        out = self.redu3(out)
        out = self.conv4(out)
        out = self.redu4(out)
        out = self.conv5(out)
        out = self.redu5(out)
        out = self.conv6(out)
        out = self.global_pool(out)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return F.softmax(out)

def darknet_53(num_classes=1000):
    return DarkNet([1, 2, 8, 8, 4], num_classes)


net = darknet_53()
summary(net, (3, 256, 256))

4.CSPDenseNet

和DenseNet的DenseBlock差不多,不过把输入的通道砍了一半,然后经过block后再组合一起

import torch
import  torch.nn as nn
from torchsummary import summary
import torch.nn.functional as F
from  senet1 import  SE

class BN_Conv2d(nn.Module):
    def __init__(self,in_channles,out_channels,kernels_size,stride,padding,dilation=1,groups=1,bias=False):
        super(BN_Conv2d, self).__init__()
        self.seq=nn.Sequential(
            nn.Conv2d(in_channles,out_channels,kernel_size=kernels_size,stride=stride,padding=padding,dilation=dilation,groups=groups,bias=bias),
            nn.BatchNorm2d(out_channels)
        )
    def forward(self,x):
        return self.seq(x)

class DenseBlock(nn.Module):
    def __init__(self,input_channels,num_layers,grouwth_rate):
        super(DenseBlock, self).__init__()
        self.num_layers=num_layers
        self.k0=input_channels
        self.k=grouwth_rate
        self.layers=self.__make_layers()

    def __make_layers(self):
        layer_list=[]
        for i in range(self.num_layers):
            layer_list.append(nn.Sequential(
                BN_Conv2d(self.k0+i*self.k,4*self.k,1,1,0),
                BN_Conv2d(4*self.k,self.k,3,1,1)
            ))
        return layer_list

    def forward(self,x):
        feature=self.layers[0](x)
        out=torch.cat((x,feature),1)
        for i in range(1,len(self.layers)):
            feature=self.layers[i](out)
            out=torch.cat((feature,out),1)
        return out

class CSP_DenseBlock(nn.Module):
    def __init__(self,in_channels,num_layers,k,part_ratio=0.5):
        super(CSP_DenseBlock, self).__init__()
        self.part1_chnls=int(in_channels*part_ratio)
        self.part2_chnls=in_channels-self.part1_chnls
        self.dense=DenseBlock(self.part2_chnls,num_layers,k)

    def forward(self,x):
        part1=x[:,:self.part1_chnls,:,:]
        part2=x[:,self.part1_chnls:,:,:]
        part2=self.dense(part2)
        out=torch.cat((part1,part2),1)
        return out


class DenseNet(nn.Module):
    def __init__(self,layers,k,theta,num_classes,part_ratio=0):
        super(DenseNet, self).__init__()
        self.layers=layers
        self.k=k
        self.theta=theta
        self.Block=DenseBlock if part_ratio==0 else CSP_DenseBlock

        self.conv=BN_Conv2d(3,2*k,7,2,3)
        self.blocks,patches=self._make_blocks(2*k)
        self.fc=nn.Linear(patches,num_classes)

    def _make_blocks(self,k0):
        layers_list=[]
        patches=0
        for i in range(len(self.layers)):
            layers_list.append(self.Block(k0,self.layers[i],self.k))
            patches=k0+self.layers[i]*self.k
            if i !=len(self.layers)-1:
                transition,k0=self._make_transition(patches)
                layers_list.append(transition)
        return nn.Sequential(*layers_list),patches

    def _make_transition(self,in_chls):
        out_chls=int(self.theta*in_chls)
        return nn.Sequential(
            BN_Conv2d(in_chls,out_chls,1,1,0),
            nn.AvgPool2d(2)
        ),out_chls

    def forward(self, x):
        out = self.conv(x)
        out = F.max_pool2d(out, 3, 2, 1)
        # print(out.shape)
        out = self.blocks(out)
        # print(out.shape)
        out = F.avg_pool2d(out, 7)
        # print(out.shape)
        out = out.view(out.size(0), -1)
        out = F.softmax(self.fc(out))
        return out

def csp_densenet_121(num_classes=1000):
    return DenseNet([6, 12, 24, 16], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)


def csp_densenet_169(num_classes=1000):
    return DenseNet([6, 12, 32, 32], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)


def csp_densenet_201(num_classes=1000):
    return DenseNet([6, 12, 48, 32], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)


def csp_densenet_264(num_classes=1000):
    return DenseNet([6, 12, 64, 48], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)

5CSPDarkNet

import torch
import  torch.nn as nn
from torchsummary import summary
import torch.nn.functional as F


class Mish(nn.Module):
    def __init__(self):
        super(Mish, self).__init__()

    def forward(self,x):
        return x*torch.tanh(F.softplus(x))

class BN_Conv_Mish(nn.Module):
    def __init__(self,in_channels,out_channels,kernel_size,stride,padding,dilation=1,groups=1,bias=False):
        super(BN_Conv_Mish, self).__init__()
        self.conv=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation=dilation,groups=groups,bias=bias)
        self.bn=nn.BatchNorm2d(out_channels)

    def forward(self,x):
        out=self.bn(self.conv(x))
        return Mish()(out)


class ResidualBlock(nn.Module):
    def __init__(self,chnls,inner_chnnls=None):
        super(ResidualBlock, self).__init__()
        if inner_chnnls is None:
            inner_chnnls=chnls
        self.conv1=BN_Conv_Mish(chnls,inner_chnnls,1,1,0)
        self.conv2=nn.Conv2d(inner_chnnls,chnls,3,1,1,bias=False)
        self.bn=nn.BatchNorm2d(chnls)

    def forward(self,x):
        out=self.conv1(x)
        out=self.conv2(out)
        out=self.bn(out)+x
        return  Mish()(out)


class CSPFirst(nn.Module):
    def __init__(self,in_chnnls,out_chels):
        super(CSPFirst, self).__init__()
        self.dsample=BN_Conv_Mish(in_chnnls,out_chels,3,2,1)
        self.trans_0=BN_Conv_Mish(out_chels,out_chels,1,1,0)
        self.trans_1=BN_Conv_Mish(out_chels,out_chels,1,1,0)
        self.block=ResidualBlock(out_chels,out_chels//2)
        self.trans_cat=BN_Conv_Mish(2*out_chels,out_chels,1,1,0)

    def forward(self,x):
        x=self.dsample(x)
        out_0=self.trans_0(x)
        out_1=self.trans_1(x)
        out_1=self.block(out_1)
        out=torch.cat((out_0,out_1),1)
        out=self.trans_cat(out)
        return out

class CSPStem(nn.Module):
    def __init__(self,in_chnls,out_chnls,num_block):
        super(CSPStem, self).__init__()
        self.dsample=BN_Conv_Mish(in_chnls,out_chnls,3,2,1)
        self.trans_0=BN_Conv_Mish(out_chnls,out_chnls//2,1,1,0)
        self.trans_1=BN_Conv_Mish(out_chnls,out_chnls//2,1,1,0)
        self.blocks=nn.Sequential(*[ResidualBlock(out_chnls//2) for _ in range(num_block)])
        self.trans_cat=BN_Conv_Mish(out_chnls,out_chnls,1,1,0)

    def forward(self,x):
        x=self.dsample(x)
        out_0=self.trans_0(x)
        out_1=self.trans_1(x)
        out_1=self.blocks(out_1)
        out=torch.cat((out_0,out_1),1)
        out=self.trans_cat(out)
        return out

class CSP_DarkNet(nn.Module):
    def __init__(self,num_blocks,num_classes=1000):
        super(CSP_DarkNet, self).__init__()
        chnls=[64,128,256,512,1024]
        self.conv0=BN_Conv_Mish(3,32,3,1,1)
        self.neck=CSPFirst(32,chnls[0])
        self.body=nn.Sequential(
            *[CSPStem(chnls[i],chnls[i+1],num_blocks[i]) for i in range(4)]
        )
        self.global_pool=nn.AdaptiveAvgPool2d((1,1))
        self.fc=nn.Linear(chnls[4],num_classes)

    def forward(self,x):
        out=self.conv0(x)
        out=self.neck(out)
        out=self.body(out)
        out=self.global_pool(out)
        out=out.view(out.size(0),-1)
        out=self.fc(out)
        return F.softmax(out)


def csp_darknet_53(num_classes=1000):
    return CSP_DarkNet([2, 8, 8, 4], num_classes)


net = csp_darknet_53()
summary(net, (3, 256, 256))
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值