pytorch模型的保存导入与推理

1. pytorch保存和加载模型

1.1 单卡保存简单的模型和参数

#保存整个网络
torch.save(net, PATH) 
model_dict=torch.load(PATH)
# 保存网络中的参数, 速度快,占空间少
torch.save(net.state_dict(),PATH)

#先加载基础模型
model = enetv2(baseline_name, out_dim=1)
model.to(device)
#再导入参数
model_dict=model.load_state_dict(torch.load(PATH))
  • 举例
for train_index, test_index in skf.split(X, Y):
    
    model = enetv2(baseline_name, out_dim=1)
    model.to(device)
    """多卡并行训练的话
    os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'
	if torch.cuda.device_count() > 1:
	    print("Let's use", torch.cuda.device_count(), "GPUs!")
	    model = nn.DataParallel(model)
	model.to(device)
    """

    train_images, valid_images = X[train_index], X[test_index]
    train_targets, valid_targets = Y[train_index], Y[test_index]

    train_dataset = ClassificationDataset(image_paths=train_images, targets=train_targets, tr=ttransform)
    valid_dataset = ClassificationDataset(image_paths=valid_images, targets=valid_targets, tr=vtransform)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=Batch_Size,shuffle=True, num_workers=4)
    valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=Batch_Size,shuffle=False, num_workers=4)

    optimizer = torch.optim.AdamW(model.parameters(), lr=5e-4)
#     optimizer = optim.AdamW(model.parameters(), weight_decay=1e-02)
#     Scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3,epochs=20, pct_start=0.1,
#                                                    anneal_strategy='cos',div_factor=1e+3, final_div_factor=1e+3, steps_per_epoch=len(train_loader))
    

    for epoch in range(epochs):
        train(train_loader, model, optimizer, device=device)
        predictions, valid_targets = evaluate(valid_loader, model, device=device)
        roc_auc = metrics.roc_auc_score(valid_targets, predictions)
        print(f"Epoch={epoch}, Valid ROC AUC={roc_auc}")
        
        if roc_auc > best_roc_auc:
            torch.save(model.state_dict(), baseline_name + '_' + str(fold) + '.pt')
            best_roc_auc = roc_auc
    
    fold += 1

1.2 保存更多的信息

#保存更多的信息,比如优化器的参数
torch.save({'epoch': epochID + 1, 'state_dict': model.state_dict(), 'best_loss': lossMIN,
                            'optimizer': optimizer.state_dict(),'alpha': loss.alpha, 'gamma': loss.gamma},
                           checkpoint_path + '/m-' + launchTimestamp + '-' + str("%.4f" % lossMIN) + '.pth.tar')

def load_checkpoint(model, checkpoint_PATH, optimizer):
    if checkpoint != None:
        model_CKPT = torch.load(checkpoint_PATH)
        model.load_state_dict(model_CKPT['state_dict'])
        print('loading checkpoint!')
        optimizer.load_state_dict(model_CKPT['optimizer'])
    return model, optimizer

1.3 多卡模型保存

注意,要在模型名称的后面加上.module

torch.save(model.module.state_dict(), model_out_path)

1.4 多卡模型加载

当模型是在数据并行方式在多卡上进行训练的训练和保存,那么载入的时候也是一样需要是多卡。

model = enetv2(baseline_name, out_dim=1)
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)
    model.to(device)
    model.module.load_state_dict(torch.load("efficientnet-b4_0.pt"))

也可简写

model = enetv2(baseline_name, out_dim=1)
model = nn.DataParallel(model).to(device)
model.module.load_state_dict(torch.load("efficientnet-b4_0.pt"))

1.5 多卡保存的模型加载后使用单卡推理

模型跟卡没有关系,多卡并行训练的只是参数而已,故模型的导入跟卡没关系

device = "cuda"
baseline_name = 'efficientnet-b4'
model = enetv2(baseline_name, out_dim=1)
model.load_state_dict(torch.load('efficientnet-b4_V1.pt',map_location='cuda:0'))#单卡
model.to(device)

2. pytorch导入模型

2.1 导入单模型

"""1.导入模型"""
device = "cuda"
baseline_name = 'efficientnet-b4'
model = enetv2(baseline_name, out_dim=1)
#将基础模型放入到device中,加速模型计算速度
model.to(device)
#然后导入模型的参数
model.load_state_dict(torch.load('./B4/efficientnet-b4_1.pt'))

"""2.读取测试数据"""
submission=pd.read_csv('../input/seti-breakthrough-listen/sample_submission.csv')
submission['img_path']=submission['id'].apply(lambda x:f'../input/seti-breakthrough-listen/test/{x[0]}/{x}.npy')

"""3.测试数据预处理"""
test_dataset = ClassificationDataset(image_paths=submission.img_path.values, targets=submission.target.values)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=16, shuffle=False, num_workers=4)

"""4.推理"""
test_predictions, test_targets = evaluate(test_loader, model, device=device)

2.2 进行多模型融合

./B4目录下有三个保存好的模型参数efficientnet-b4_0.pt,efficientnet-b4_1.pt,efficientnet-b4_2.pt

MODEL_PATH = './B4'
baseline_name = 'efficientnet-b4'
model = enetv2(baseline_name, out_dim=1)
model.to(device)

submission=pd.read_csv('../input/seti-breakthrough-listen/sample_submission.csv')
submission['img_path']=submission['id'].apply(lambda x:f'../input/seti-breakthrough-listen/test/{x[0]}/{x}.npy')

test_dataset=ClassificationDataset(image_paths=submission.img_path.values, targets=submission.target.values)
test_loader=torch.utils.data.DataLoader(test_dataset, batch_size=16,shuffle=False,num_workers=4)
sig=torch.nn.Sigmoid()
outs=[]

for i in range(3):
    model.load_state_dict(torch.load(f'{MODEL_PATH}/efficientnet-b4_{i}.pt'))
    predictions,valid_targets=evaluate(test_loader, model, device=device)
    predictions=np.array(predictions)[:,0]
    out=sig(torch.from_numpy(predictions))
    out=out.detach().numpy()
    outs.append(out)

pred=np.mean(np.array(outs),axis=0)
submission.target=pred
submission.drop(['img_path'],axis=1,inplace=True)
submission.to_csv('submission_fusion.csv', index=False)

1.tensorflow导入efficientnet模型

import efficientnet.tfkeras as efn

MODEL_PATH = '/kaggle/input/k/daicongxmu/siim-covid19-efnb7-train-fold0-5-2class'

test_paths = image_df.image_path.tolist()
image_df['none'] = 0
label_cols = ['none']

test_decoder = build_decoder(with_labels=False,
                             target_size=(IMAGE_DIMS[0],
                                          IMAGE_DIMS[0]), ext='png')
test_dataset = build_dataset(
    test_paths, bsize=BATCH_SIZE, repeat=False, 
    shuffle=False, augment=False, cache=False,
    decode_fn=test_decoder
)

with tf.device('/device:GPU:0'):
    models = []
    models0 = tf.keras.models.load_model(f'{MODEL_PATH}/model0.h5')
    models1 = tf.keras.models.load_model(f'{MODEL_PATH}/model1.h5')
    models2 = tf.keras.models.load_model(f'{MODEL_PATH}/model2.h5')
    models3 = tf.keras.models.load_model(f'{MODEL_PATH}/model3.h5')
    models4 = tf.keras.models.load_model(f'{MODEL_PATH}/model4.h5')
    models.append(models0)
    models.append(models1)
    models.append(models2)
    models.append(models3)
    models.append(models4)

image_df[label_cols] = sum([model.predict(test_dataset, verbose=1) for model in models]) / len(models)

del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值