ubuntu16.04使用C++调用pytorch训练的模型,并使用opencv进行数据加载和预测

参考官方文档:https://pytorch.org/tutorials/advanced/cpp_export.html
我使用的模型是一个由python训练的图像到坐标点(x,y)的pytorch框架下的回归模型
具体思路是使用官方libtorch库,实现C++调用python训练的模型,方便工程使用
libtorch版本挺重要,我下载的是官网目前最新版本1.2.0.dev20190722,看别人之前版本的代码甚至有些成员函数都不太一样

流程

1.首先将python训练的pth模型转化成pt文件
2.下载libtorch,这里可以从官网直接下载编译好的库文件,或者自己下载源码编译。经测直接下载官网的libtorch库文件就可以
3.直接使用
4.封装成类,方便工程使用

1.python训练模型在转化成pt文件

不需要安装其他东西,直接上代码

import torch
import torchvision

# An instance of your model.
model = torchvision.models.resnet18()

# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)

# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("traced_resnet_model.pt")
#输出空白图像的结果 方便和C++版本的比较
output = traced_script_module(torch.ones(1, 3, 224, 224))
print(output)

2.安装libtorch

这里cu90表示cuda9.0,如果是cpu就改成cpu

wget https://download.pytorch.org/libtorch/nightly/cu90/libtorch-shared-with-deps-latest.zip
unzip libtorch-shared-with-deps-latest.zip

解压出来一个libtorch文件夹,就是编译好的库了

3.使用

我使用的IDE是Qt-5.10.1,这里我提供了Cmake和qmake两种build方法,亲测可用

3.1使用Cmake

先测试一下libtorch能否使用
新建项目,选择Cmake
修改CmakeList.txt

cmake_minimum_required(VERSION 2.8)

project(dcgan)

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

set(Torch_DIR /home/xbw/libtorch/share/cmake/Torch)
find_package(Torch REQUIRED)

add_executable(${PROJECT_NAME} "main.cpp")

target_link_libraries(dcgan "${TORCH_LIBRARIES}")
set_property(TARGET dcgan PROPERTY CXX_STANDARD 11)

修改main.cpp

#include <torch/torch.h>
#include <torch/script.h>
#include <iostream>
using namespace std;
int main() {
    torch::jit::script::Module module;
    //模型路径记得修改成自己的
    module = torch::jit::load("/home/xxx/siamese/model/pytorch/traced_resnet_model.pt");
    module.to(at::kCUDA);
//    torch::Tensor tensor = torch::eye(3);
    // Create a vector of inputs.
    std::vector<torch::jit::IValue> inputs;
    inputs.push_back(torch::ones({1, 3, 224, 224}).to(at::kCUDA));

    at::Tensor output = module.forward(inputs).toTensor();

    cout << output.slice(1,0,2) << endl;
    return 0;
}

编译运行,和python输出结果一致,表示安装正常
接着配合opencv进行测试
修改CmakeList.txt

cmake_minimum_required(VERSION 2.8)

project(dcgan2)

set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

set(Torch_DIR /home/xbw/libtorch/share/cmake/Torch)
find_package(Torch REQUIRED)

add_executable(${PROJECT_NAME} "main.cpp")


find_package(OpenCV  REQUIRED)

include_directories(${OpenCV_INCLUDE_DIRS} ${TORCH_INCLUDE_DIRS})

target_link_libraries(dcgan2 "${TORCH_LIBRARIES}" ${OpenCV_LIBS} stdc++fs)
set_property(TARGET dcgan2 PROPERTY CXX_STANDARD 11)

修改main.cpp

#include <torch/torch.h>
#include <torch/script.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include <vector>

using namespace std;
using namespace cv;
int main() {
    torch::jit::script::Module module;
    module = torch::jit::load("/home/xbw/siamese/model/pytorch/traced_resnet_model.pt");
    module.to(at::kCUDA);
//    torch::Tensor tensor = torch::eye(3);
    // Create a vector of inputs.
    std::vector<torch::jit::IValue> inputs;
    Mat image = imread("/home/xbw/carla/PythonAPI/examples/train_3/img/t00000224.png");

    if(image.empty())
    {
        cout<<"can't find image!!!"<<endl;
        return 0;
    }
    resize(image, image, Size(224, 224));
    cvtColor(image, image, CV_BGR2RGB);
    Mat img_float;
    image.convertTo(img_float, CV_32F, 1.0 / 255);
    cout << img_float.at<Vec3f>(100,100)[1] << endl;
    imshow("res",img_float);
    waitKey();
    auto img_tensor = torch::from_blob(img_float.data, {1, 224, 224, 3}, torch::kFloat32);
    img_tensor = img_tensor.permute({0, 3, 1, 2});

    inputs.emplace_back(img_tensor.to(at::kCUDA));
    at::Tensor output = module.forward(inputs).toTensor();
    inputs.pop_back();
    cout << output.slice(1,0,2) << endl;
    return 0;
}

正常输出结果则说明运行正常

3.2使用qmake

实验室使用的都是qmake,为了保持同步我也尝试了使用qmake进行build。新建项目选择qmake
修改dcgan3.pro
主要包含opencv、cuda、libtorch三个库
其中这句要加,不加的话会出现报错

QMAKE_CXXFLAGS += -D_GLIBCXX_USE_CXX11_ABI=0

参考自https://github.com/pytorch/pytorch/issues/13541

TEMPLATE = app
CONFIG += console c++11
CONFIG -= app_bundle
CONFIG -= qt

SOURCES += main.cpp

QMAKE_CXXFLAGS += -D_GLIBCXX_USE_CXX11_ABI=0

QMAKE_LIBDIR += /usr/local/cuda/lib64
INCLUDEPATH += /usr/local/cuda/include\
    /usr/local/include/opencv \
    /usr/local/include/ \

LIBS += \
        -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand -pthread\
        -L/usr/local/opencv-3.4.5/lib/ -lopencv_calib3d -lopencv_features2d -lopencv_flann \
         -lopencv_core -lopencv_imgproc -lopencv_ml  -lopencv_objdetect  -lopencv_photo -lopencv_stitching \
         -lopencv_videoio -lopencv_imgcodecs -lopencv_highgui \
        -lopencv_superres -lopencv_video -lopencv_videostab

INCLUDEPATH += /usr/include \
               /home/xbw/libtorch/include \
               /home/xbw/libtorch/include/torch/csrc/api/include

LIBS += -L/usr/lib \
        -L/home/xbw/libtorch/lib \
        -L/usr/local/cuda/lib64

LIBS += -lstdc++fs

LIBS += -lopencv_imgproc -lopencv_core -lopencv_highgui -lopencv_imgcodecs

LIBS += -lc10 -lc10_cuda -ltorch -lgomp \
               -lcudart -lnvToolsExt

main.cpp与上面相同

#include <torch/torch.h>
#include <torch/script.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include <vector>

using namespace std;
using namespace cv;
int main() {
    torch::jit::script::Module module;
    module = torch::jit::load("/home/xbw/siamese/model/pytorch/traced_resnet_model.pt");
    module.to(at::kCUDA);
//    torch::Tensor tensor = torch::eye(3);
    // Create a vector of inputs.
    std::vector<torch::jit::IValue> inputs;
    Mat image = imread("/home/xbw/carla/PythonAPI/examples/train_3/img/t00000224.png");

    if(image.empty())
    {
        cout<<"can't find image!!!"<<endl;
        return 0;
    }
    resize(image, image, Size(224, 224));
    cvtColor(image, image, CV_BGR2RGB);
    Mat img_float;
    image.convertTo(img_float, CV_32F, 1.0 / 255);
    cout << img_float.at<Vec3f>(100,100)[1] << endl;
    imshow("res",img_float);
    waitKey();
    auto img_tensor = torch::from_blob(img_float.data, {1, 224, 224, 3}, torch::kFloat32);
    img_tensor = img_tensor.permute({0, 3, 1, 2});

    inputs.emplace_back(img_tensor.to(at::kCUDA));
    at::Tensor output = module.forward(inputs).toTensor();
    inputs.pop_back();
    cout << output.slice(1,0,2) << endl;
    return 0;
}

封装

添加一个头文件和一个源文件即可
头文件torchmodel.h

#ifndef TORCHMODEL_H
#define TORCHMODEL_H
#include <torch/torch.h>
#include <torch/script.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgcodecs.hpp"

class TorchModel
{
public:
    TorchModel(const std::string &s,const cv::Size &img_size):model_path(s),img_size(img_size){}
    void init();
    cv::Point2f getPoint(cv::Mat img);
private:
    std::string model_path;
    cv::Size img_size;
    torch::jit::script::Module module;
};
#endif // TORCHMODEL_H

源文件torchmodel.cpp

#include <./torch_model/torchmodel.h>

void TorchModel::init()
{
    module = torch::jit::load(model_path);
    module.to(at::kCUDA);
}

cv::Point2f TorchModel::getPoint(cv::Mat img)
{
    cv::resize(img, img, img_size);
    cv::Mat img_float;
    img.convertTo(img_float, CV_32F, 1.0 / 255);
    auto img_tensor = torch::from_blob(img_float.data, {1, 224, 224, 3}, torch::kFloat32);
    img_tensor = img_tensor.permute({0, 3, 1, 2});
    std::vector<torch::jit::IValue> inputs;
    inputs.emplace_back(img_tensor.to(at::kCUDA));
    at::Tensor output = module.forward(inputs).toTensor();
    float res_x = output[0][0].item().toFloat();
    float res_y = output[0][1].item().toFloat();
    return cv::Point2f(res_x,res_y);
}

注意头文件的地址改成你自己的地址,接着在主函数调用就可以用了

#include <vector>
#include <string>
#include <./torch_model/torchmodel.h>
using namespace std;
using namespace cv;

int main()
{
    TorchModel model_test("/home/xbw/siamese/model/pytorch/traced_resnet_model.pt",Size(224,224));
    model_test.init();
    Mat image = imread("/home/xbw/carla/PythonAPI/examples/train_3/img/t00000224.png");
    cvtColor(image, image, CV_BGR2RGB);
    Point2f res = model_test.getPoint(image);
    cout<<res.x<<' '<<res.y<<endl;
    return 0;
}

  • 4
    点赞
  • 26
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值