win10 c++调用pytorch模型

1.pytorch模型生成pt模型

"""Export a pth model to TorchScript formats


import time
import torch
import torch.nn as nn
from torch.utils.mobile_optimizer import optimize_for_mobile
from model.model import parsingNet



def main():
    net=“测试代码中调用模型的代码”
    state_dict = torch.load("./model/ep099.pth", map_location='cpu')['model']
    net.load_state_dict(compatible_state_dict, strict=False)
    net.eval()

    # An example input you would normally provide to your model's forward() method.
    example = torch.rand(1,3,288,800).cuda()

    # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
    traced_script_module = torch.jit.trace(net, example)
    output = traced_script_module(torch.ones(1,3,288,800).cuda())
    traced_script_module.save("./model/best.pt")

    # The traced ScriptModule can now be evaluated identically to a regular PyTorch module
    print(output)


if __name__ == "__main__":
    main()

2. vs2019下配置libtorch

注意libtorch版本和训练模型的pytorch版本一致

3. 使用c++调用pytorch模型

#include <torch/script.h>
#include <iostream>

int main(void)
{
	
	torch::jit::script::Module module = torch::jit::load("best.pt");

	assert(module != nullptr);

	std::cout << "Model is loaded!" << std::endl;
	// Create a vector of inputs.
	std::vector<torch::jit::IValue> inputs;
	inputs.push_back(torch::ones({ 1, 3, 288, 800 }).cuda());

	// Execute the model and turn its output into a tensor.
	at::Tensor result = module.forward(inputs).toTensor();

	std::cout << result << std::endl;

	system("pause");

	return 0;
}

评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值