vovnet 测试

博主分享了关于Vovnet的个人参数修改经验,对比原版模型,调整后的配置在最大batch为4的情况下,运行时间从130ms有所变化。
摘要由CSDN通过智能技术生成
vovnet39, 1070 640*640 batch 1 15ms

自己改了参数: 

import time

import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict

__all__ = ['VoVNet', 'vovnet27_slim', 'vovnet39', 'vovnet57']



def conv3x3(in_channels, out_channels, module_name, postfix,
            stride=1, groups=1, kernel_size=3, padding=1):
    """3x3 convolution with padding"""
    return [
        ('{}_{}/conv'.format(module_name, postfix),
         nn.Conv2d(in_channels, out_channels,
                   kernel_size=kernel_size,
                   stride=stride,
                   paddi
VoVNet是一种用于图像分类的深度卷积神经网络,其特点是具有高效的计算和较少的参数。以下是VoVNet骨干网络的pytorch实现: ```python import torch.nn as nn class Conv2dBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 super(Conv2dBNReLU, self).__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=True) ) class Stem(nn.Module): def __init__(self): super(Stem, self).__init__() self.conv1 = Conv2dBNReLU(3, 64, kernel_size=3, stride=2) self.conv2 = Conv2dBNReLU(64, 64, kernel_size=3, stride=1) self.conv3 = Conv2dBNReLU(64, 128, kernel_size=3, stride=1) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.maxpool(x) return x class VoVNet(nn.Module): def __init__(self, cfg): super(VoVNet, self).__init__() self.cfg = cfg self.stem = Stem() self.stage1 = self._make_stage(self.cfg[0]) self.stage2 = self._make_stage(self.cfg[1]) self.stage3 = self._make_stage(self.cfg[2]) self.stage4 = self._make_stage(self.cfg[3]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(self.cfg[-1][-1][0], 1000) def forward(self, x): x = self.stem(x) x = self.stage1(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def _make_stage(self, planes): modules = [] for i in range(len(planes)): in_planes = 64 if i == 0 else planes[i-1][0] out_planes = planes[i][0] num_blocks = planes[i][1] stride = planes[i][2] modules.append(VoVNetUnit(in_planes, out_planes, num_blocks, stride)) return nn.Sequential(*modules) class VoVNetUnit(nn.Module): def __init__(self, in_planes, out_planes, num_blocks, stride): super(VoVNetUnit, self).__init__() self.stride = stride self.num_blocks = num_blocks self.conv = nn.ModuleList() self.bn = nn.ModuleList() self.relu = nn.ReLU(inplace=True) for i in range(num_blocks): if i == 0: self.conv.append(Conv2dBNReLU(in_planes, out_planes, kernel_size=3, stride=stride)) else: self.conv.append(Conv2dBNReLU(out_planes, out_planes, kernel_size=3, stride=1)) self.bn.append(nn.BatchNorm2d(out_planes)) def forward(self, x): residual = x for i in range(self.num_blocks): out = self.conv[i](x) out = self.bn[i](out) if i == 0 and self.stride != 1: residual = out elif i == 0: residual = x residual += out x = self.relu(residual) return x ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

AI算法网奇

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值