安装libtorch:
主要目的是想用C++在服务器上部署深度学习模型,libtorch安装较简单,按步骤既可以
主要注意版本问题,最开始我安装了cu118的最新版本,各种奇怪的报错,后来果断卸载安装cu117,2.0.1对应pytorch版本,就直接安装成功了
参考链接: C++部署pytorch模型
参考链接: Ubuntu 20.04下c++ libtorch gpu配置与运行
部分命令如下:
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH="/data/l50037097/libtorch" -DCMAKE_CUDA_ARCHITECTURES="80" ..
make
安装OpenCV:
注意这里不能安装OpenCV 4.X版本 容易和libtoch冲突
安装Opencv 3.4版本即可
参考链接: Ubuntu20安装OpenCV3(图解亲测)
OpenCV和ffmpeg版本也冲突:
解决办法: 直接禁用ffmpeg 后续也不涉及视频处理
链接: opencv+ffmpeg环境(ubuntu)搭建全面详解
链接: link
最后解决
记录最终的CMakeLists.txt文件
cmake_minimum_required(VERSION 3.0)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
project(cmake_main)
link_directories(/usr/local/cuda-11.7/lib64) # 替换成您的 CUDA 安装路径
set(CMAKE_CUDA_COMPILER /usr/local/cuda-11.7/bin/nvcc)
set(Torch_DIR /data/l50037097/libtorch/share/cmake/Torch)
find_package(OpenCV REQUIRED)
find_package(Torch REQUIRED)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
# include_directories(${OpenCV_INCLUDE_DIRS})
## set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
enable_language(CUDA) # 启用CUDA语言支持
add_executable(cmake_main main_cpp.cpp)
target_link_libraries(cmake_main "${TORCH_LIBRARIES}" ${OpenCV_LIBS})
set_property(TARGET cmake_main PROPERTY CXX_STANDARD 17)
结果记录:
torch_test.cpp
#include <torch/script.h>
#include <iostream>
#include <memory>
int main(int argc, const char* argv[]){
if(argc != 2){
std::cerr << "usage: main <path-to-exported-script-module>\n";
return -1;
}
torch::Device device(torch::kCUDA);
// Deserialize the ScriptModule from a file using torch::jit::load()
torch::jit::script::Module module = torch::jit::load(argv[1]);
module.to(device);
// Create a vector of inputs
std::vector<torch::jit::IValue> inputs;
inputs.push_back(torch::ones({1, 3, 224, 224}).to(device));
// Exectute the model
at::Tensor output = module.forward(inputs).toTensor();
std::cout << output.slice(/*dims=*/1, /*start=*/0, /*end=*/5) << '\n';
std::cout << "ok\n";
}
my_test.cpp
#include <torch/script.h>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <torch/torch.h>
// 有人说调用的顺序有关系,我这好像没啥用~~
int main()
{
torch::DeviceType device_type;
if (torch::cuda::is_available()) {
std::cout << "CUDA available! Predicting on GPU." << std::endl;
device_type = torch::kCUDA;
}
else {
std::cout << "Predicting on CPU." << std::endl;
device_type = torch::kCPU;
}
torch::Device device(device_type);
//Init model
std::string model_pb = "tests.pth";
auto module = torch::jit::load(model_pb);
module.to(at::kCUDA);
auto image = cv::imread("dog.jpg", cv::ImreadModes::IMREAD_COLOR);
cv::Mat image_transfomed;
cv::resize(image, image_transfomed, cv::Size(32, 32));
// convert to tensort
torch::Tensor tensor_image = torch::from_blob(image_transfomed.data,
{ image_transfomed.rows, image_transfomed.cols,3 }, torch::kByte);
tensor_image = tensor_image.permute({ 2,0,1 });
tensor_image = tensor_image.toType(torch::kFloat);
tensor_image = tensor_image.div(255);
tensor_image = tensor_image.unsqueeze(0);
tensor_image = tensor_image.to(at::kCUDA);
torch::Tensor output = module.forward({ tensor_image }).toTensor();
auto max_result = output.max(1, true);
auto max_index = std::get<1>(max_result).item<float>();
std::cout << output << std::endl;
//return max_index;
return 0;
}
编译成功!!!!!