大赛网址:http://challenge.xfyun.cn/topic/info?type=PET
赛事说明及要求请查看上面网址。数据下载需要实名认证,这里提供[百度网盘下载地址] (提取码:4hz2)。
赛题数据
赛题数据分训练集和测试集。
- 训练集分CN和AD两种类别的图像,每个类别一个文件夹共1千张,因此训练集共2千张图像。
- 测试集有1千张图像,当然是没有标签的,需要我们训练模型预测是AD还是CN,结果以CSV形式提交。大赛网站提供了提交模板。
可以发现训练集中图像尺寸各不一样,为了清楚地了解图像中都有哪些尺寸,可使用如下代码进行遍历:
import cv2
import glob
train_image_path = glob.glob(r"data\train\*\*.png")
sizeSet = set()
for i in train_image_path:
img = cv2.imread(i)
sizeSet.add(img.shape[:2])
print(sizeSet)
输出:
{(256, 256), (168, 168), (336, 336), (128, 128)}
因此,需要注意:赛题图像有四种大小:128*128、168*168、256*256、336*336。
0. 导入所需的库
# -*- coding: utf-8 -*-
import os, sys, glob, argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
import time, datetime
import pdb, traceback
import cv2
# import imagehash
from PIL import Image
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold
# from efficientnet_pytorch import EfficientNet
# model = EfficientNet.from_pretrained('efficientnet-b4')
import torch
torch.manual_seed(0)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data.dataset import Dataset
for i in (pd, np, torch):
print("{}: {}".format(i.__name__, i.__version__))
输出:
pandas: 0.24.2
numpy: 1.17.4
torch: 1.5.1+cu101
1. 训练代码:
# input dataset
train_jpg = np.array(glob.glob(r'data\train\*\*.png'))
class QRDataset(Dataset):
def __init__(self, train_jpg, transform=None):
self.train_jpg = train_jpg
if transform is not None:
self.transform = transform
else:
self.transform = None
def __getitem__(self, index):
start_time = time.time()
img = Image.open(self.train_jpg[index]).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img,torch.from_numpy(np.array(int('AD' in self.train_jpg[index])))
def __len__(self):
return len(self.train_jpg)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = ""
def pr2int(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
class VisitNet(nn.Module):
def __init__(self):
super(VisitNet, self).__init__()
model = models.resnet34(True)
model.avgpool = nn.AdaptiveAvgPool2d(1)
model.fc = nn.Linear(512, 2)
self.resnet = model
def forward(self, img):
out = self.resnet(img)
return out
def validate(val_loader, model, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@2', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda()
target = target.cuda().long()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 2))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1
def predict(test_loader, model, tta=10):
# switch to evaluate mode
model.eval()
test_pred_tta = None
for _ in range(tta):
test_pred = []
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(test_loader):
input = input.cuda()
target = target.cuda()
# compute output
output = model(input, path)
output = output.data.cpu().numpy()
test_pred.append(output)
test_pred = np.vstack(test_pred)
if test_pred_tta is None:
test_pred_tta = test_pred
else:
test_pred_tta += test_pred
return test_pred_tta
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter('Time', ':6.3f')
# data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
# top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, losses, top1)
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True).long()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 2))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 0:
progress.pr2int(i)
skf = KFold(n_splits=10, random_state=233, shuffle=True)
for flod_idx, (train_idx, val_idx) in enumerate(skf.split(train_jpg, train_jpg)):
train_loader = torch.utils.data.DataLoader(
QRDataset(train_jpg[train_idx],
transforms.Compose([
# transforms.RandomGrayscale(),
transforms.Resize((512, 512)),
transforms.RandomAffine(10),
# transforms.ColorJitter(hue=.05, saturation=.05),
# transforms.RandomCrop((450, 450)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
#), batch_size=10, shuffle=True, num_workers=20, pin_memory=True
), batch_size=10, shuffle=True, num_workers=0, pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
QRDataset(train_jpg[val_idx],
transforms.Compose([
transforms.Resize((512, 512)),
# transforms.Resize((124, 124)),
# transforms.RandomCrop((450, 450)),
# transforms.RandomCrop((88, 88)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
#), batch_size=10, shuffle=False, num_workers=10, pin_memory=True
), batch_size=10, shuffle=False, num_workers=0, pin_memory=True
)
model = VisitNet().cuda()
# model = nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), 0.01)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.85)
best_acc = 0.0
for epoch in range(10):
scheduler.step()
print('Epoch: ', epoch)
train(train_loader, model, criterion, optimizer, epoch)
val_acc = validate(val_loader, model, criterion)
if val_acc.avg.item() > best_acc:
best_acc = val_acc.avg.item()
torch.save(model.state_dict(), r'model_pytorch\resnet18_fold{0}.pt'.format(flod_idx))
break
输出:
Epoch: 0
[ 0/180] Time 11.350 (11.350) Loss 8.3745e-01 (8.3745e-01) Acc@1 50.00 ( 50.00)
[100/180] Time 0.358 ( 0.471) Loss 1.2588e+00 (8.5685e-01) Acc@1 50.00 ( 57.43)
* Acc@1 68.000 Acc@5 100.000
Epoch: 1
[ 0/180] Time 0.280 ( 0.280) Loss 2.0223e-01 (2.0223e-01) Acc@1 90.00 ( 90.00)
[100/180] Time 0.365 ( 0.365) Loss 1.0570e+00 (5.5575e-01) Acc@1 50.00 ( 74.06)
* Acc@1 49.500 Acc@5 100.000
Epoch: 2
[ 0/180] Time 0.271 ( 0.271) Loss 4.6693e-01 (4.6693e-01) Acc@1 80.00 ( 80.00)
[100/180] Time 0.364 ( 0.364) Loss 7.1321e-01 (4.3906e-01) Acc@1 70.00 ( 81.09)
* Acc@1 50.500 Acc@5 100.000
Epoch: 3
[ 0/180] Time 0.286 ( 0.286) Loss 9.2954e-01 (9.2954e-01) Acc@1 50.00 ( 50.00)
[100/180] Time 0.360 ( 0.367) Loss 4.1622e-01 (3.5760e-01) Acc@1 80.00 ( 84.55)
* Acc@1 79.000 Acc@5 100.000
Epoch: 4
[ 0/180] Time 0.261 ( 0.261) Loss 4.8178e-01 (4.8178e-01) Acc@1 70.00 ( 70.00)
[100/180] Time 0.363 ( 0.363) Loss 7.1621e-01 (3.2671e-01) Acc@1 60.00 ( 85.74)
* Acc@1 95.000 Acc@5 100.000
Epoch: 5
[ 0/180] Time 0.268 ( 0.268) Loss 3.6726e-01 (3.6726e-01) Acc@1 90.00 ( 90.00)
[100/180] Time 0.362 ( 0.367) Loss 1.1221e-01 (2.2348e-01) Acc@1 100.00 ( 90.79)
* Acc@1 92.000 Acc@5 100.000
Epoch: 6
[ 0/180] Time 0.257 ( 0.257) Loss 3.8822e-02 (3.8822e-02) Acc@1 100.00 (100.00)
[100/180] Time 0.365 ( 0.366) Loss 7.4892e-01 (2.6901e-01) Acc@1 80.00 ( 88.81)
* Acc@1 86.000 Acc@5 100.000
...
2. 测试代码:
# -*- coding: utf-8 -*-
import os, sys, glob, argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
import time, datetime
import pdb, traceback
import cv2
# import imagehash
from PIL import Image
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold
# from efficientnet_pytorch import EfficientNet
# model = EfficientNet.from_pretrained('efficientnet-b4')
import torch
torch.manual_seed(0)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data.dataset import Dataset
class QRDataset(Dataset):
def __init__(self, train_jpg, transform=None):
self.train_jpg = train_jpg
if transform is not None:
self.transform = transform
else:
self.transform = None
def __getitem__(self, index):
start_time = time.time()
img = Image.open(self.train_jpg[index]).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img,torch.from_numpy(np.array(int('AD' in self.train_jpg[index])))
def __len__(self):
return len(self.train_jpg)
class VisitNet(nn.Module):
def __init__(self):
super(VisitNet, self).__init__()
model = models.resnet34(True)
model.avgpool = nn.AdaptiveAvgPool2d(1)
model.fc = nn.Linear(512, 2)
self.resnet = model
# model = EfficientNet.from_pretrained('efficientnet-b4')
# model._fc = nn.Linear(1792, 2)
# self.resnet = model
def forward(self, img):
out = self.resnet(img)
return out
def predict(test_loader, model, tta=10):
# switch to evaluate mode
model.eval()
test_pred_tta = None
for _ in range(tta):
test_pred = []
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(test_loader):
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
output = output.data.cpu().numpy()
test_pred.append(output)
test_pred = np.vstack(test_pred)
if test_pred_tta is None:
test_pred_tta = test_pred
else:
test_pred_tta += test_pred
return test_pred_tta
test_jpg = [r'data\test\AD&CN\{0}.png'.format(x) for x in range(1, 1001)]
test_jpg = np.array(test_jpg)
test_pred = None
for model_path in ['resnet18_fold0.pt', 'resnet18_fold1.pt', 'resnet18_fold2.pt',
'resnet18_fold3.pt', 'resnet18_fold4.pt', 'resnet18_fold5.pt',
'resnet18_fold6.pt', 'resnet18_fold7.pt', 'resnet18_fold8.pt',
#'resnet18_fold9.pt'][:1]:
'resnet18_fold9.pt']:
test_loader = torch.utils.data.DataLoader(
QRDataset(test_jpg,
transforms.Compose([
transforms.Resize((512, 512)),
# transforms.CenterCrop((450, 450)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
#), batch_size=10, shuffle=False, num_workers=10, pin_memory=True
), batch_size=10, shuffle=False, num_workers=0, pin_memory=True
)
model = VisitNet().cuda()
model_path = os.path.join("model_pytorch", model_path)
model.load_state_dict(torch.load(model_path))
# model = nn.DataParallel(model).cuda()
if test_pred is None:
test_pred = predict(test_loader, model, 5)
else:
test_pred += predict(test_loader, model, 5)
test_csv = pd.DataFrame()
test_csv['uuid'] = list(range(1, 1001))
test_csv['label'] = np.argmax(test_pred, 1)
test_csv['label'] = test_csv['label'].map({1: 'AD', 0: 'CN'})
test_csv.to_csv('tmp.csv', index=None)
测试结果将保存到tmp.csv文件中,可以将此文件作为结果提交到大赛网站,如下图所示:
刚提交完的状态:
提交完刷新一下,新快就能出结果: