1. 在windows系统下,需要修改BEFUnet-main\trainer.py第76行代码。
2.修改BEFUnet-main/test.py第45行代码,添加default=True,要不然预测图不会被保存至指定文件夹。
3.由于在运行train.py时,会隔每个eval_interval保存模型并进行推理,我嫌麻烦就把推理给省去,直接保存模型。在trainer.py中修改了trainer这个函数
def trainer(args, model, snapshot_path):
date_and_time = datetime.datetime.now()
os.makedirs(os.path.join(snapshot_path, 'test'), exist_ok=True)
test_save_path = os.path.join(snapshot_path, 'test')
# Save logs
logging.basicConfig(filename=snapshot_path + f"/{args.model_name}" + str(date_and_time) + "_log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size * args.n_gpu
db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train",
transform=transforms.Compose(
[RandomGenerator(output_size=[args.img_size, args.img_size])]))
db_test = Synapse_dataset(base_dir=args.test_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
print("The length of train set is: {}".format(len(db_train)))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True,
worker_init_fn=worker_init_fn)
if args.n_gpu > 1:
model = nn.DataParallel(model)
model.train()
ce_loss = CrossEntropyLoss()
dice_loss = DiceLoss(num_classes)
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path + '/log')
iter_num = 0
max_epoch = args.max_epochs
max_iterations = args.max_epochs * len(trainloader)
logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations))
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
image_batch, label_batch = sampled_batch['image'], sampled_batch['label']
image_batch, label_batch = image_batch.cuda(), label_batch.cuda()
B, C, H, W = image_batch.shape
image_batch = image_batch.expand(B, 3, H, W)
outputs = model(image_batch)
loss_ce = ce_loss(outputs, label_batch[:].long())
loss_dice = dice_loss(outputs, label_batch, softmax=True)
loss = 0.4 * loss_ce + 0.6 * loss_dice
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
logging.info('iteration %d : loss : %f, loss_ce: %f loss_dice: %f' % (iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
try:
if iter_num % 10 == 0:
image = image_batch[1, 0:1, :, :]
image = (image - image.min()) / (image.max() - image.min())
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
except:
pass
# 仅保存模型,不运行验证集推理
if (epoch_num + 1) % args.eval_interval == 0:
filename = f'{args.model_name}_epoch_{epoch_num}.pth'
save_mode_path = os.path.join(snapshot_path, filename)
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if epoch_num >= max_epoch - 1:
filename = f'{args.model_name}_epoch_{epoch_num}.pth'
save_mode_path = os.path.join(snapshot_path, filename)
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
iterator.close()
break
writer.close()
return "Training Finished!"
4.在训练中断时,我可以保存已经训练好的模型作为预训练模型继续进行训练(因为模型默认的预训练模型是Swin和pidinet模型)。在/root/autodl-tmp/BEFUnet-main/train.py文件中做如下修改。