稍微修改了合肥心电的冠军代码(链接:https://github.com/zhengqi98/Hefei_ECG_TOP1),换用不同的随机种子,取得top5成绩。
class GeM2D(nn.Module):
def __init__(self, kernel_size=(1,2), p=3, eps=1e-6):
super(GeM2D,self).__init__()
self.p = nn.Parameter(torch.ones(1)*p)
self.eps = eps
self.kernel_size = kernel_size
def forward(self, x):
return self.gem(x, p=self.p, eps=self.eps)
def gem(self, x, p=3, eps=1e-6):
return F.max_pool2d(x.clamp(min=eps).pow(p), self.kernel_size).pow(1./p)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
class SiLU(torch.nn.Module):
@staticmethod
def forward(x):
return x * torch.sigmoid(x)
def conv_2d(in_planes, out_planes, stride=(1,1), size=3):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=(1,size), stride=stride,
padding=(0,(size-1)//2), bias=False)
def conv_1d(in_planes, out_planes, stride=1, size=3):
"""3x3 convolution with padding"""
return nn.Conv1d(in_planes, out_planes, kernel_size=size, stride=stride,
padding=(size-1)//2, bias=False)
class BasicBlock1d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, size=3, res=True):
super(BasicBlock1d, self).__init__()
self.conv1 = conv_1d(inplanes, planes, stride, size=size)
self.bn1 = nn.BatchNorm1d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_1d(planes, planes, size=size)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = conv_1d(planes, planes, size=size)
self.bn3 = nn.BatchNorm1d(planes)
self.dropout = nn.Dropout(.2)
self.downsample = downsample
self.stride = stride
self.res = res
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.res:
if self.downsample is not None:
residual = self.downsample(x)
out += residual
#out = self.relu(out)
return out
class BasicBlock2d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=(1,1), downsample=None, size=3, res=True):
super(BasicBlock2d, self).__init__()
self.conv1 = conv_2d(inplanes, planes, stride, size=size)
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_2d(planes, planes, size=size)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv_2d(planes, planes, size=size)
self.bn3 = nn.BatchNorm2d(planes)
self.dropout = nn.Dropout(.2)
self.downsample = downsample
self.stride = stride
self.res = res
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.res:
if self.downsample is not None:
residual = self.downsample(x)
out += residual
#out = self.relu(out)
return out
class CNN(nn.Module):
def __init__(self, input_channel=1, num_classes=18):#, layers=[2, 2, 2, 2, 2, 2]
sizes = [
[3,3,3,3,3,3],
[5,5,5,5,5,5],
[7,7,7,7,7,7],
#[11,11,11,11,11,11]
]
self.sizes = sizes
layers = [
[3,3,2,2,2,2],
[3,2,2,2,2,2],
[2,2,2,2,2,2],
#[1,2,2,2,2,2],
]
super(CNN, self).__init__()
self.inplanes = 16
self.relu = SiLU()
#self.relu =nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(input_channel, 16, kernel_size=(1,50), stride=(1,2), padding=(0,0),
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, self.inplanes, kernel_size=(1,16), stride=(1,2), padding=(0,0),
bias=False)
self.bn2 = nn.BatchNorm2d(self.inplanes)
#self.conv3 = nn.Conv2d(32, 32, kernel_size=(1,16), stride=(1,2), padding=(0,0),
# bias=False)
#print(self.conv2)
#self.bn3 = nn.BatchNorm2d(32)
self.maxpool = nn.MaxPool2d(kernel_size=(1,2), stride=(1,2), padding=(0,0))
self.avgpool = nn.AvgPool2d(kernel_size=(1,2), stride=(1,2), padding=(0,0))
#self.maxpool = GeM2D(kernel_size=(1,2))
self.globalavgpool = nn.AdaptiveAvgPool1d(1)
self.globalmaxpool = nn.AdaptiveMaxPool1d(1)
self.dropout = nn.Dropout(.2)
self.layers1_list = nn.ModuleList()
self.layers2_list = nn.ModuleList()
for i,size in enumerate(sizes):
self.inplanes = 16
self.layers1 = nn.Sequential()
self.layers2 = nn.Sequential()
self.layers1.add_module('layer{}_1_1'.format(size), self._make_layer2d(BasicBlock2d, self.inplanes, layers[i][0], stride=(1,1), size=sizes[i][0]))
self.layers1.add_module('layer{}_1_2'.format(size), self._make_layer2d(BasicBlock2d, self.inplanes, layers[i][1], stride=(1,1), size=sizes[i][1]))
self.layers1.add_module('layer{}_1_3'.format(size), self._make_layer2d(BasicBlock2d, self.inplanes, layers[i][1], stride=(1,1), size=sizes[i][1]))
self.inplanes *= 8
self.layers2.add_module('layer{}_2_1'.format(size), self._make_layer1d(BasicBlock1d, self.inplanes, layers[i][2], stride=2, size=sizes[i][2]))
self.layers2.add_module('layer{}_2_2'.format(size), self._make_layer1d(BasicBlock1d, self.inplanes, layers[i][3], stride=2, size=sizes[i][3]))
self.layers2.add_module('layer{}_2_3'.format(size), self._make_layer1d(BasicBlock1d, self.inplanes, layers[i][4], stride=2, size=sizes[i][4]))
self.layers2.add_module('layer{}_2_4'.format(size), self._make_layer1d(BasicBlock1d, self.inplanes, layers[i][5], stride=2, size=sizes[i][5]))
self.layers2.add_module('layer{}_2_5'.format(size), self._make_layer1d(BasicBlock1d, self.inplanes, layers[i][5], stride=2, size=sizes[i][5]))
self.layers1_list.append(self.layers1)
self.layers2_list.append(self.layers2)
self.drop = nn.Dropout(p=0.2)
self.fc = nn.Linear(self.inplanes*len(sizes), Config.label_size)
def _make_layer1d(self, block, planes, blocks, stride=2, size=3, res=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion,
kernel_size=1, padding=0, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, size=size, res=res))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, size=size, res=res))
return nn.Sequential(*layers)
def _make_layer2d(self, block, planes, blocks, stride=(1,2), size=3, res=True):
downsample = None
if stride != (1,1) or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=(1,1), padding=(0,0), stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, size=size, res=res))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, size=size, res=res))
return nn.Sequential(*layers)
def forward(self, x0):
x0 = x0.unsqueeze(1)
x0 = self.conv1(x0)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x0 = self.avgpool(x0)
x0 = self.conv2(x0)
#x0 = self.bn2(x0)
#x0 = self.relu(x0)
x0 = self.avgpool(x0)
#x0 = self.conv3(x0)
#x0 = self.maxpool(x0)
x0 = self.dropout(x0)
xs = []
for i in range(len(self.sizes)):
#print(self.layers1_list[i])
x = self.layers1_list[i](x0)
#x = x.transpose(1, 2)
x = torch.flatten(x, start_dim=1,end_dim=2)
x = self.layers2_list[i](x)
x = self.dropout(x)
x = self.globalavgpool(x)
#y = self.globalmaxpool(x)
xs.append(x)
#xs.append(y)
out = torch.cat(xs, dim=2)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out