DenseNet-Model(pytorch版本)

D e n s e N e t − M o d e l ( p y t o r c h 版 本 ) DenseNet-Model(pytorch版本) DenseNetModel(pytorch)

import torch
import torch.nn as nn
from collections import OrderedDict
class _DenseLayer(nn.Sequential):
    def __init__(self, in_channels, growth_rate, bn_size):
        super(_DenseLayer, self).__init__()
        self.add_module('norm1', nn.BatchNorm2d(in_channels))
        self.add_module('relu1', nn.ReLU(inplace=True))
        self.add_module('conv1', nn.Conv2d(in_channels, bn_size * growth_rate,
                                           kernel_size=1,
                                           stride=1, bias=False))
        self.add_module('norm2', nn.BatchNorm2d(bn_size*growth_rate))
        self.add_module('relu2', nn.ReLU(inplace=True))
        self.add_module('conv2', nn.Conv2d(bn_size*growth_rate, growth_rate,
                                           kernel_size=3,
                                           stride=1, padding=1, bias=False))

    # 重载forward函数
    def forward(self, x):
        new_features = super(_DenseLayer, self).forward(x)
        return torch.cat([x, new_features], 1)


class _DenseBlock(nn.Sequential):
    def __init__(self, num_layers, in_channels, bn_size, growth_rate):
        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            self.add_module('denselayer%d' % (i+1),
                            _DenseLayer(in_channels+growth_rate*i,
                                        growth_rate, bn_size))


class _Transition(nn.Sequential):
    def __init__(self, in_channels, out_channels):
        super(_Transition, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(in_channels))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(in_channels, out_channels,
                                          kernel_size=1,
                                          stride=1, bias=False))
        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))


class DenseNet_BC(nn.Module):
    def __init__(self, growth_rate=12, block_config=(6,12,24,16),
                 bn_size=4, theta=0.5, num_classes=10):
        super(DenseNet_BC, self).__init__()

        # 初始的卷积为filter:2倍的growth_rate
        num_init_feature = 2 * growth_rate

        # 表示cifar-10
        if num_classes == 10:
            self.features = nn.Sequential(OrderedDict([
                ('conv0', nn.Conv2d(3, num_init_feature,
                                    kernel_size=3, stride=1,
                                    padding=1, bias=False)),
            ]))
        else:
            self.features = nn.Sequential(OrderedDict([
                ('conv0', nn.Conv2d(3, num_init_feature,
                                    kernel_size=7, stride=2,
                                    padding=3, bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_feature)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
            ]))



        num_feature = num_init_feature
        for i, num_layers in enumerate(block_config):
            self.features.add_module('denseblock%d' % (i+1),
                                     _DenseBlock(num_layers, num_feature,
                                                 bn_size, growth_rate))
            num_feature = num_feature + growth_rate * num_layers
            if i != len(block_config)-1:
                self.features.add_module('transition%d' % (i + 1),
                                         _Transition(num_feature,
                                                     int(num_feature * theta)))
                num_feature = int(num_feature * theta)

        self.features.add_module('norm5', nn.BatchNorm2d(num_feature))
        self.features.add_module('relu5', nn.ReLU(inplace=True))
        self.features.add_module('avg_pool', nn.AdaptiveAvgPool2d((1, 1)))

        self.classifier = nn.Linear(num_feature, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        features = self.features(x)
        out = features.view(features.size(0), -1)
        out = self.classifier(out)
        return out


# DenseNet_BC for ImageNet
def DenseNet121(num_classes):
    num_classes=num_classes
    return DenseNet_BC(growth_rate=32, block_config=(6, 12, 24, 16), num_classes=num_classes)

def DenseNet169(num_classes):
    num_classes=num_classes
    return DenseNet_BC(growth_rate=32, block_config=(6, 12, 32, 32), num_classes=num_classes)

def DenseNet201(num_classes):
    num_classes=num_classes
    return DenseNet_BC(growth_rate=32, block_config=(6, 12, 48, 32), num_classes=num_classes)

def DenseNet161(num_classes):
    num_classes=num_classes
    return DenseNet_BC(growth_rate=48, block_config=(6, 12, 36, 24), num_classes=num_classes)

# DenseNet_BC for cifar
def densenet_BC_100():
    return DenseNet_BC(growth_rate=12, block_config=(16, 16, 16))
# 随机生成输入数据
rgb = torch.randn(1, 3, 224, 224)
# 定义网络
net = DenseNet121(num_classes=10)
# 前向传播
out = net(rgb)
print('-----'*5)
# 打印输出大小
print(out.shape)
print('-----'*5)

在这里插入图片描述

# 随机生成输入数据
rgb = torch.randn(1, 3, 224, 224)
# 定义网络
net = DenseNet169(num_classes=10)
# 前向传播
out = net(rgb)
print('-----'*5)
# 打印输出大小
print(out.shape)
print('-----'*5)

在这里插入图片描述

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
DenseNet是一种深度卷积神经网络,通过密集连接(dense connection)的方式将不同层的特征图进行连接,以提高特征的重用和信息流动性。下面是使用PyTorch实现DenseNet网络结构的示例代码: ```python import torch import torch.nn as nn import torch.nn.functional as F # 定义DenseNet的基本单元 class BasicBlock(nn.Module): def __init__(self, in_channels, growth_rate): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_channels) self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_channels, growth_rate, kernel_size=3, padding=1, bias=False) def forward(self, x): out = self.conv1(self.relu(self.bn1(x))) out = torch.cat([x, out], 1) # 在通道维度上进行连接 return out # 定义DenseNet网络结构 class DenseNet(nn.Module): def __init__(self, num_blocks, growth_rate=32, num_classes=10): super(DenseNet, self).__init__() self.in_channels = 64 # 第一层卷积 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False) # DenseBlock层 self.dense_blocks = nn.ModuleList([]) self.transition_blocks = nn.ModuleList([]) for i in range(len(num_blocks)): num_channels = self.in_channels + i * growth_rate self.dense_blocks.append(self._make_dense_block(num_channels, growth_rate, num_blocks[i])) self.in_channels = num_channels + num_blocks[i] * growth_rate if i != len(num_blocks) - 1: self.transition_blocks.append(self._make_transition_block(self.in_channels)) self.bn = nn.BatchNorm2d(self.in_channels) self.relu = nn.ReLU(inplace=True) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(self.in_channels, num_classes) def forward(self, x): out = self.conv1(x) for i in range(len(self.dense_blocks)): out = self.dense_blocks[i](out) if i != len(self.dense_blocks) - 1: out = self.transition_blocks[i](out) out = self.relu(self.bn(out)) out = self.avgpool(out) out = torch.flatten(out, 1) out = self.fc(out) return out def _make_dense_block(self, in_channels, growth_rate, num_blocks): layers = [] for _ in range(num_blocks): layers.append(BasicBlock(in_channels, growth_rate)) in_channels += growth_rate return nn.Sequential(*layers) def _make_transition_block(self, in_channels): block = nn.Sequential( nn.BatchNorm2d(in_channels), nn.ReLU(inplace=True), nn.Conv2d(in_channels, in_channels // 2, kernel_size=1, bias=False), nn.AvgPool2d(kernel_size=2, stride=2) ) return block # 创建DenseNet模型 model = DenseNet([6, 12, 24, 16], growth_rate=32, num_classes=1000) # 输出模型结构 print(model) ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值