pytorch中resnet_pytorch resnet实现

官方github上已经有了pytorch基础模型的实现,链接

但是其中一些模型,尤其是resnet,都是用函数生成的各个层,自己看起来是真的难受!

所以自己按照caffe的样子,写一个pytorch的resnet18模型,当然和1000分类模型不同,模型做了一些修改,输入48*48的3通道图片,输出7类。

import torch.nn as nn

import torch.nn.functional as F

class ResNet18Model(nn.Module):

def __init__(self):

super().__init__()

self.bn64_0 = nn.BatchNorm2d(64)

self.bn64_1 = nn.BatchNorm2d(64)

self.bn64_2 = nn.BatchNorm2d(64)

self.bn64_3 = nn.BatchNorm2d(64)

self.bn64_4 = nn.BatchNorm2d(64)

self.bn128_0 = nn.BatchNorm2d(128)

self.bn128_1 = nn.BatchNorm2d(128)

self.bn128_2 = nn.BatchNorm2d(128)

self.bn128_3 = nn.BatchNorm2d(128)

self.bn256_0 = nn.BatchNorm2d(256)

self.bn256_1 = nn.BatchNorm2d(256)

self.bn256_2 = nn.BatchNorm2d(256)

self.bn256_3 = nn.BatchNorm2d(256)

self.bn512_0 = nn.BatchNorm2d(512)

self.bn512_1 = nn.BatchNorm2d(512)

self.bn512_2 = nn.BatchNorm2d(512)

self.bn512_3 = nn.BatchNorm2d(512)

self.shortcut_straight_0 = nn.Sequential()

self.shortcut_straight_1 = nn.Sequential()

self.shortcut_straight_2 = nn.Sequential()

self.shortcut_straight_3 = nn.Sequential()

self.shortcut_straight_4 = nn.Sequential()

self.shortcut_conv_bn_64_128_0 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False),nn.BatchNorm2d(128))

self.shortcut_conv_bn_128_256_0 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=1, stride=2, bias=False),nn.BatchNorm2d(256))

self.shortcut_conv_bn_256_512_0 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=1, stride=2, bias=False),nn.BatchNorm2d(512))

self.conv_w3_h3_in3_out64_s1_p1_0 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in64_out64_s1_p1_0 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in64_out64_s1_p1_1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in64_out64_s1_p1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in64_out64_s1_p1_3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in64_out128_s2_p1_0 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False)

self.conv_w3_h3_in128_out128_s1_p1_0 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in128_out128_s1_p1_1 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in128_out128_s1_p1_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in128_out256_s2_p1_0 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False)

self.conv_w3_h3_in256_out256_s1_p1_0 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in256_out256_s1_p1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in256_out256_s1_p1_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in256_out512_s2_p1_0 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)

self.conv_w3_h3_in512_out512_s1_p1_0 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in512_out512_s1_p1_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)

self.conv_w3_h3_in512_out512_s1_p1_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)

self.avg_pool_0 = nn.AdaptiveAvgPool2d((1, 1))

self.fc_512_7_0 = nn.Linear(512, 7)

self.dropout_0 = nn.Dropout(p=0.5)

def forward(self, x):

# 48*48*3

t = self.conv_w3_h3_in3_out64_s1_p1_0(x) #48*48*64

t = self.bn64_0(t)

y1 = F.relu(t)

t = self.conv_w3_h3_in64_out64_s1_p1_0(y1) #48*48*64

t = self.bn64_1(t)

y2 = F.relu(t)

t = self.conv_w3_h3_in64_out64_s1_p1_1(y2) #48*48*64

t = self.bn64_2(t)

t += self.shortcut_straight_0(y1)

y3 = F.relu(t)

t = self.conv_w3_h3_in64_out64_s1_p1_2(y3) #48*48*64

t = self.bn64_3(t)

y4 = F.relu(t)

t = self.conv_w3_h3_in64_out64_s1_p1_3(y4) #48*48*64

t = self.bn64_4(t)

t += self.shortcut_straight_1(y3)

y5 = F.relu(t)

t = self.conv_w3_h3_in64_out128_s2_p1_0(y5) #24*24*128

t = self.bn128_0(t)

y6 = F.relu(t)

t = self.conv_w3_h3_in128_out128_s1_p1_0(y6) #24*24*128

t = self.bn128_1(t)

t += self.shortcut_conv_bn_64_128_0(y5)

y7 = F.relu(t)

t = self.conv_w3_h3_in128_out128_s1_p1_1(y7) #24*24*128

t = self.bn128_2(t)

y8 = F.relu(t)

t = self.conv_w3_h3_in128_out128_s1_p1_2(y8) #24*24*128

t = self.bn128_3(t)

t += self.shortcut_straight_2(y7)

y9 = F.relu(t)

t = self.conv_w3_h3_in128_out256_s2_p1_0(y9) #12*12*256

t = self.bn256_0(t)

y10 = F.relu(t)

t = self.conv_w3_h3_in256_out256_s1_p1_0(y10) #12*12*256

t = self.bn256_1(t)

t += self.shortcut_conv_bn_128_256_0(y9)

y11 = F.relu(t)

t = self.conv_w3_h3_in256_out256_s1_p1_1(y11) #12*12*256

t = self.bn256_2(t)

y12 = F.relu(t)

t = self.conv_w3_h3_in256_out256_s1_p1_2(y12) #12*12*256

t = self.bn256_3(t)

t += self.shortcut_straight_3(y11)

y13 = F.relu(t)

t = self.conv_w3_h3_in256_out512_s2_p1_0(y13) #6*6*512

t = self.bn512_0(t)

y14 = F.relu(t)

t = self.conv_w3_h3_in512_out512_s1_p1_0(y14) #6*6*512

t = self.bn512_1(t)

t += self.shortcut_conv_bn_256_512_0(y13)

y15 = F.relu(t)

t = self.conv_w3_h3_in512_out512_s1_p1_1(y15) #6*6*512

t = self.bn512_2(t)

y16 = F.relu(t)

t = self.conv_w3_h3_in512_out512_s1_p1_2(y16) #6*6*512

t = self.bn512_3(t)

t += self.shortcut_straight_4(y15)

y17 = F.relu(t)

out = self.avg_pool_0(y17) #1*1*512

out = out.view(out.size(0), -1)

out = self.dropout_0(out)

out = self.fc_512_7_0(out)

return out

if __name__ == '__main__':

net = ResNet18Model()

# print(net)

import torch

net_in = torch.rand(1, 3, 48, 48)

net_out = net(net_in)

print(net_out)

print(net_out.size())

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值