主干网络大概有6块结构,代码需要修改输入的图片路径和reshape的图片大小(需要修改img.shape[0]和img.shape[1])
代码如下:
#主函数CSPDarknet import torch import torch.nn as nn import cv2 #1.silu激活函数 class SiLU(nn.Module): @staticmethod def forward(x): return x*torch.sigmoid(x) def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] return p #2.构造卷积函数 class Conv(nn.Module): def __init__(self, c1=3, c2=3, k=1, s=1, p=None, g=1, act=True): super(Conv, self).__init__() # 卷积、标准化加激活函数 self.conv=nn.Conv2d(c1,c2,k,s,autopad(k,p),groups=g, bias=False) self.bn=nn.BatchNorm2d(c2,eps=0.001,momentum=0.03) self.act=SiLU() if act is True else (act if isinstance(act,nn.Module) else nn.Identity())#激活函数 def forward(self,x): return self.act(self.bn(self.conv(x)))#卷积->BatchNorm2d->激活函数->output def fuseforward(self, x): return self.act(self.conv(x)) # 这个forward函数不存在BN操作 #3.Focus函数: 对图片处理 640X640X3--->320X320X12------>320X320X64 class Focus(nn.Module): def __init__(self,c1=3, c2=64, k=1, s=1, p=None, g=1, act=True): super(Focus,self).__init__() # 通道数变为原来的4倍 self.conv=Conv(c1*4,c2,k,s,p,g,act) def forward(self,x): return self.conv( # torch.cat:按照维数对数组拼接,1表示横着拼接,需要行数相同,以下拼接后通道数乘以4,行数减半. torch.cat( [ x[..., ::2, ::2],# 640, 640, 3 => 320, 320, 12 x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2] ], 1 ) ) # 4.CSPLayer中的残差结构的定义 class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1=3, c2=3, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion super(Bottleneck, self).__init__() c_=int(c2*e) self.cv1=Conv(c1,c_,1,1) self.cv2=Conv(c_,c2,3,1,g=g) self.add=shortcut and c1==c2 def forward(self,x): return x+self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) #5.csplayer结构,用残差结构进行特征提取 class C3(nn.Module): def __init__(self, c1=3, c2=3, n=1, shortcut=True, g=1, e=0.5): super(C3,self).__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3=Conv(2*c_,c2,1) self.m=nn.Sequential(*[Bottleneck(c_,c_,shortcut,g,e=1.0) for _ in range(n)]) def forward(self,x): return self.cv3(torch.cat( ( self.m(self.cv1(x)), self.cv2(x) ) ,dim=1)) #6.SPP通过不同池化和大小的最大池化进行特征提取,提高网络的感受野 class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1=3, c2=3, k=(5, 9, 13)): super(SPP, self).__init__() c_=c1//2 self.cv1=Conv(c1,c_,1,1) self.cv2=Conv(c_*(len(k)+1),c2,1,1) self.m=nn.ModuleList([nn.MaxPool2d(kernel_size=x,stride=1,padding=x//2) for x in k]) def forward(self,x): x=self.cv1(x) #return self.cv2(torch.cat([x]+[m(x) for m in self.m],1)) return [m(x) for m in self.m] if __name__ == "__main__": #输入图片路径 image=cv2.imread(r"D:\AI\data\red_green_light\4.jpg") images=image.reshape(1,3,1080, 1920) data=torch.tensor(images) datas=torch.tensor(images,dtype=torch.float32) c1=3 c2=3 #1.输出激活函数处理数据的对比的结果 SL=SiLU() ss1=SL(data) print("1.激活函数-原矩阵:",(ss1-images).sum()) #对原矩阵的值的影响不大 #2.输出Conv卷积网络处理数据的对比结果 Cv=Conv() ss2=Cv(datas) #print("2.Conv函数-原矩阵:", ss2) print("2.Conv函数-原矩阵:", (ss2-ss1).sum()) #由于加入了归一化,所以原矩阵的值变小 #3.Focus网络处理数据结果 640X640X3--->320X320X12------>320X320X64 Fs=Focus() ss3=Fs(datas) #print("3.Focus函数", ss3) print("3.Focus函数: ", ss3.shape) #4.Bottleneck残差网络处理数据的对比结果 Bk=Bottleneck() ss4=Bk(datas) print("4.Bottleneck残差网络:", (ss4-ss1).sum()) #5.csplayer的C3网络结构处理数据的对比结果 C=C3() ss5=C(datas) print("5.csplayer的C3网络:", ss5.shape) #6.SPP网络结构处理数据的对比结果 SP=SPP() ss6=SP(datas) #print("6.SPP网络:", ss6.shape) print("6.SPP网络:", ss6)