YOLOV5的残差网络的代码实现如下: import torch import cv2 import numpy as np import torch.nn as nn #1.输入图片路径 image=cv2.imread(r"D:\AI\data\red_green_light\4.jpg") images=image.reshape(1,3,1080, 1920) input=torch.tensor(images,dtype=torch.float32)#浮点型 #2.silu激活函数 class SiLU(nn.Module): @staticmethod def forward(x): return x * torch.sigmoid(x) def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] return p #3.卷积结构 class Conv(nn.Module): def __init__(self, c1=3, c2=3, k=1, s=1, p=None, g=1, act=True): super(Conv, self).__init__() #卷积、标准化加激活函数 self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)#卷积 self.bn = nn.BatchNorm2d(c2, eps=0.001, momentum=0.03) #标准化 self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) #激活函数 def forward(self, x): return self.act(self.bn(self.conv(x))) #卷积->BatchNorm2d->激活函数->output def fuseforward(self, x): return self.act(self.conv(x)) #这个forward函数不存在BN操作 #4.残差网络 #注意:这里c1,c2自己取了定值,c1是输入图片的通道数,这里c2取了等于c1的值 class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1=3, c2=3, shortcut=True, g=1, e=0.5): super(Bottleneck,self).__init__() c_=int(c2*e)# hidden channels #下面用了两次卷积网络结构,每次结构都用了:卷积、标准化加激活函数 self.cv1 = Conv(c1, c_, 1, 1) # 输入通道数为c1,输出通道数为c_,卷积核大小为1,步长为1,默认为0填充(实际上是图片本身) self.cv2 = Conv(c_, c2, 3, 1, g=g) # 输入通道数为c_,输出通道数为c2,卷积核大小为3,步长为1,默认为0填充 self.add=shortcut and c1==c2# c1与c2需相同才可进行shortcut运算 def forward(self,x): # 根据self.add的值确定是否有shortcut return x+self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) #5.输出 #卷积网络 hh=Conv() output2=hh(input) #运用残差网络 ss=Bottleneck() output=ss(input) print("输入值x",input) print("卷积值:",output2) print("残差值:",output)