1.pytorch模型生成pt模型
"""Export a pth model to TorchScript formats
import time
import torch
import torch.nn as nn
from torch.utils.mobile_optimizer import optimize_for_mobile
from model.model import parsingNet
def main():
net=“测试代码中调用模型的代码”
state_dict = torch.load("./model/ep099.pth", map_location='cpu')['model']
net.load_state_dict(compatible_state_dict, strict=False)
net.eval()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1,3,288,800).cuda()
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(net, example)
output = traced_script_module(torch.ones(1,3,288,800).cuda())
traced_script_module.save("./model/best.pt")
# The traced ScriptModule can now be evaluated identically to a regular PyTorch module
print(output)
if __name__ == "__main__":
main()
2. vs2019下配置libtorch
注意libtorch版本和训练模型的pytorch版本一致
3. 使用c++调用pytorch模型
#include <torch/script.h>
#include <iostream>
int main(void)
{
torch::jit::script::Module module = torch::jit::load("best.pt");
assert(module != nullptr);
std::cout << "Model is loaded!" << std::endl;
// Create a vector of inputs.
std::vector<torch::jit::IValue> inputs;
inputs.push_back(torch::ones({ 1, 3, 288, 800 }).cuda());
// Execute the model and turn its output into a tensor.
at::Tensor result = module.forward(inputs).toTensor();
std::cout << result << std::endl;
system("pause");
return 0;
}