yolov3_pelee_spp_panet组合网络

结合pelee、yolov3、spp、panet的网络,增强网络的拟合性,在官方数据集coco和自己的车辆数据集表现效果均优于正常的yolov3网络,而且此网络模型属于轻量化模型,模型只有28M,参数少,速度快。

[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=8
subdivisions=1
width=512
height=512
flip=1
channels=3
momentum=0.9
decay=0.0005
#angle=10, -10
angle=0
saturation = 1.5
exposure = 1.5
hue=.1

learning_rate=0.001
burn_in=1000
max_batches = 100000
policy=steps
steps=80000,90000
scales=.1,.1

# mosaic=1
bgr=1

[convolutional]
filters=32
size=3
stride=2
pad=1
batch_normalize=1
activation=leaky

[maxpool]
# 1
size=2
stride=2

[route]
# 2
layers=-2

[convolutional]
# 3
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 4
filters=32
size=3
stride=2
pad=1
batch_normalize=1
activation=leaky

[route]
# 5
layers=-4,-1

[convolutional]
# 6
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 7
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 8
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 9
layers=-3

[convolutional]
# 10
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 11
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 12
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 13
layers=-7,-5,-1

[convolutional]
# 14
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 15
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 16
layers=-3

[convolutional]
# 17
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 18
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 19
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 20
layers=-7,-5,-1

[convolutional]
# 21
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 22
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 23
layers=-3

[convolutional]
# 24
filters=16
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 25
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 26
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 27
layers=-7,-5,-1

[convolutional]
# 28
filters=128
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[maxpool]
size=2
stride=2
# 29

[convolutional]
# 30
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 31
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 32
layers=-3

[convolutional]
# 33
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 34
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 35
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 36
layers=-7,-5,-1

[convolutional]
# 37
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 38
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 39
layers=-3

[convolutional]
# 40
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 41
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 42
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 43
layers=-7,-5,-1

[convolutional]
# 44
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 45
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 46
layers=-3

[convolutional]
# 47
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 48
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 49
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 50
layers=-7,-5,-1

[convolutional]
# 51
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 52
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 53
layers=-3

[convolutional]
# 54
filters=32
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 55
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 56
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 57
layers=-7,-5,-1

[convolutional]
# 58
filters=256
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[maxpool]
size=2
stride=2
# 59

[convolutional]
# 60
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 61
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 62
layers=-3

[convolutional]
# 63
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 64
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 65
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 66
layers=-7,-5,-1

[convolutional]
# 67
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 68
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 69
layers=-3

[convolutional]
# 70
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 71
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 72
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 73
layers=-7,-5,-1

[convolutional]
# 74
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 75
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 76
layers=-3

[convolutional]
# 77
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 78
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 79
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 80
layers=-7,-5,-1

[convolutional]
# 81
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 82
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 83
layers=-3

[convolutional]
# 84
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 85
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#86
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#87
layers=-7,-5,-1

[convolutional]
#88
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#89
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#90
layers=-3

[convolutional]
#91
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#92
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#93
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#94
layers=-7,-5,-1

[convolutional]
#95
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#96
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#97
layers=-3

[convolutional]
#98
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#99
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#100
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#101
layers=-7,-5,-1

[convolutional]
#102
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#103
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#104
layers=-3

[convolutional]
#105
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#106
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#107
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#108
layers=-7,-5,-1

[convolutional]
#109
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#110
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#111
layers=-3

[convolutional]
#112
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#113
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
#114
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
#115
layers=-7,-5,-1

[convolutional]
#116
filters=512
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[maxpool]
size=2
stride=2
#117

[convolutional]
# 118
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 119
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 120
layers=-3

[convolutional]
# 121
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 122
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 123
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 124
layers=-7,-5,-1

[convolutional]
# 125
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 126
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 127
layers=-3

[convolutional]
# 128
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 129
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 130
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 131
layers=-7,-5,-1

[convolutional]
# 132
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 133
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 134
layers=-3

[convolutional]
# 135
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 136
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 137
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 138
layers=-7,-5,-1

[convolutional]
# 139
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 140
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 141
layers=-3

[convolutional]
# 142
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 143
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 144
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 145
layers=-7,-5,-1

[convolutional]
# 146
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 147
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 148
layers=-3

[convolutional]
# 149
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 150
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 151
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 152
layers=-7,-5,-1

[convolutional]
# 153
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 154
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 155
layers=-3

[convolutional]
# 156
filters=64
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 157
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 158
filters=16
size=3
stride=1
pad=1
batch_normalize=1
activation=leaky

[route]
# 159
layers=-7,-5,-1

[convolutional]
# 160
filters=704
size=1
stride=1
pad=1
batch_normalize=1
activation=leaky

[convolutional]
# 161
filters=256
size=1
pad=1
stride=1
batch_normalize=1
activation=leaky

### SPP ###
[maxpool]
# 162
stride=1
size=5

[route]
# 163
layers=-2

[maxpool]
# 164
stride=1
size=9

[route]
# 165
layers=-4

[maxpool]
# 166
stride=1
size=13

[route]
# 167
layers=-1,-3,-5,-6

[convolutional]
# 168
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[upsample]
# 169
stride=2

[route]
# 170
layers = -1,116 

[convolutional]
# 171
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
# 172
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
# 173
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
# 174
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
# 175
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
# 176
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
# 177
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[upsample]
# 178
stride=2

[route]
# 179
layers = -1,58

[convolutional]
# 180
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
# 181
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
# 182
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
# 183
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
# 184
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
# 185
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky

[convolutional]
# 186
size=1
stride=1
pad=1
filters=56
activation=linear

[yolo]
# 187
mask = 0,1,2,3
anchors =   5, 12,  13, 16,   9, 35,  26, 29,  17, 66,  47, 48,  28,114,  62, 88, 104,123,  64,205, 142,232, 229,335
classes=9
num=12
stride=8
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6

[route]
# 188
layers=-3

[convolutional]
# 189
filters=128
size=3
pad=1
stride=2
batch_normalize=1
activation=leaky

[route]
layers=-1, 177

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
size=1
stride=1
pad=1
filters=56
activation=linear


[yolo]
mask = 4,5,6,7
anchors =   5, 12,  13, 16,   9, 35,  26, 29,  17, 66,  47, 48,  28,114,  62, 88, 104,123,  64,205, 142,232, 229,335
classes=9
num=12
stride=16
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
scale_x_y = 1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6

[route]
layers = -3

[convolutional]
filters=512
size=3
pad=1
stride=2
batch_normalize=1
activation=leaky

[route]
layers=-1,168

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky

[convolutional]
size=1
stride=1
pad=1
filters=56
activation=linear

[yolo]
mask = 8,9,10,11
anchors =   5, 12,  13, 16,   9, 35,  26, 29,  17, 66,  47, 48,  28,114,  62, 88, 104,123,  64,205, 142,232, 229,335
classes=9
num=12
stride=32
jitter=.3
ignore_thresh=.7
truth_thresh=1
random=1
scale_x_y = 1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6

以下是PeleeNet神经网络的PyTorch代码: ```python import torch.nn as nn class PeleeNet(nn.Module): def __init__(self, num_classes=100): super(PeleeNet, self).__init__() self.num_classes = num_classes self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Sequential( nn.Conv2d(32, 16, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True) ) self.conv3_1 = nn.Sequential( nn.Conv2d(32, 32, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True) ) self.conv3_2 = nn.Sequential( nn.Conv2d(64, 32, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True) ) self.conv4_1 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True) ) self.conv4_2 = nn.Sequential( nn.Conv2d(128, 64, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True) ) self.conv5_1 = nn.Sequential( nn.Conv2d(128, 128, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True) ) self.conv5_2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True) ) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(256, num_classes) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.conv3_1(x) x = self.conv3_2(x) x = self.conv4_1(x) x = self.conv4_2(x) x = self.conv5_1(x) x = self.conv5_2(x) x = self.avgpool(x) x = x.view(x.size(), -1) x = self.fc(x) return x ``` 希望对你有所帮助!
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值