使用torch实现神经网络的一些结构
有时间会长期更新,主要是练一练自己的代码
前言
在学习深度学习 用于目标检测领域当中很多时候需要自己去修改代码而不是仅仅Git别人的repository,经过这么长时间来自己几乎都是对着书本或者网上的一些资源来敲代码,因此迫切的想独立的写一些经典的结构,因此这篇文章来了。
一、空间池化(自定义)
class spatial_pool(nn.Module):
def __init__(self,kernel_size,stride):
super(spatial_pool, self).__init__()
self.maxpool_1 = nn.MaxPool2d(kernel_size[0],stride[0],padding=kernel_size[0]//2)
self.maxpool_2 = nn.MaxPool2d(kernel_size[1],stride[1],padding=kernel_size[1]//2)
self.avgpool_1 = nn.AvgPool2d(kernel_size[2],stride[2],padding=kernel_size[2]//2)
def forward(self,x):
y_1 = self.maxpool_1(x)
y_2 = self.maxpool_2(x)
y_3 = self.avgpool_1(x)
print(torch.add(torch.add(y_1,y_2),y_3).shape)
y_2 = torch.cat([y_1,y_2],dim=1)
print(y_2.shape)
return torch.cat([y_2,y_3],dim=1)
# img = torch.FloatTensor(1,32,64,64)
# pool = spatial_pool((3,5,3),(1,1,1))
# print(pool(img).shape)
二、残差学习block
class residual_learning(nn.Module):
def __init__(self,input_dim,hidden_dim,output_dim,kernel_size,strdie):
super(residual_learning, self).__init__()
self.conv1 = nn.Conv2d(input_dim,hidden_dim,kernel_size[0],strdie[0],padding=kernel_size[0]//2)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim,output_dim,kernel_size[1],strdie[1],padding=kernel_size[1]//2)
self.bn2 = nn.BatchNorm2d(output_dim)
self.relu = nn.ReLU()
def forward(self,x):
y = self.conv1(x)
y = self.bn1(y)
y = self.conv2(y)
y = self.bn2(y)
return torch.add(x,y)
# img = torch.FloatTensor(1,32,64,64)
# residual = residual_learning(32,64,32,[3,3],[1,1])
# print(residual(img).shape)
三、FPN
class fpn(nn.Module):
def __init__(self,input_dim,hidden_1,hidden_2,output_dim):
super(fpn, self).__init__()
self.conv1 = nn.Conv2d(input_dim,hidden_1,kernel_size=3,stride=2,padding=1)
self.conv2 = nn.Conv2d(hidden_1,hidden_2,kernel_size=3,stride=2,padding=1)
self.conv3 = nn.Conv2d(hidden_2,output_dim,kernel_size=3,stride=2,padding=1)
self.upsampel = nn.Upsample(scale_factor=2,mode='nearest')
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm2d(hidden_1)
self.bn2 = nn.BatchNorm2d(hidden_2)
self.bn3 = nn.BatchNorm2d(output_dim)
self.conv1x1_1 = nn.Conv2d(hidden_2,output_dim,kernel_size=1,stride=1)
self.conv1x1_2 = nn.Conv2d(hidden_1,output_dim,kernel_size=1,stride=1)
def forward(self,x):
y_1 = [None for i in range(3)]
y_1[0] = self.relu(self.bn1(self.conv1(x)))
y_1[1] = self.relu(self.bn2(self.conv2(y_1[0])))
y_1[2] = self.relu(self.bn3(self.conv3(y_1[1])))
y_2 = [None for i in range(3)]
y_2[0] = y_1[2]
y_2[1] = torch.add(self.conv1x1_1(y_1[1]),self.upsampel(y_2[0]))
y_2[2] = torch.add(self.conv1x1_2(y_1[0]),self.upsampel(y_2[1]))
return y_2
img = torch.FloatTensor(1,32,64,64)
f = fpn(32,32,64,64)
result = f(img)
print(result[0].shape,result[1].shape,result[2].shape)
总结
学无止境,希望通过博客将自己的一步步脚印记录下来,加油学习人!