一、Cmake编译
1、GPU部署
黄色部分的路径对应修改、WITH_GPU打勾
2、CPU部署
黄色部分的路径对应修改、取消WITH_GPU打勾
**注意:**paddle_inference的CPU版本与GPU版本的选择
二、工程
点击Cmake中的Open Project打开工程后,设置为Release/x64,再设置main为启动项目后,点击“生成”。
可以看到在Release文件夹下生成main.exe,则可在命令行中测试。
下面修改代码,以便直接在VS中运行。
1、在PrintBenchmarkLog函数前加入下列行。
std::string model_dir;
std::string image_file;
std::string video_file;
std::string image_dir;
int batch_size = 1;
bool use_gpu = true; //若CPU部署,此处改为false
std::string device = "GPU"; //若CPU部署,此处改为"CPU"
int camera_id = -1;
double threshold = 0.1;
std::string output_dir = "output";
std::string run_mode = "paddle";
int gpu_id = 0;
bool run_benchmark = false;
bool use_mkldnn = false;
double cpu_threads = 0.9;
bool use_dynamic_shape = false;
int trt_min_shape = 1;
int trt_max_shape = 1280;
int trt_opt_shape = 640;
bool trt_calib_mode = false;
2、主函数对应修改
int main(int argc, char** argv) {
std::string model_dir = "D:/Paddle/.../inference_model/solov2_r50_enhance_coco";
std::string image_file = "D:/Paddle/..../test/208.bmp";
std::string output_dir = "D:/Paddle/.../output";
// Parsing command-line
//google::ParseCommandLineFlags(&argc, &argv, true);
if (model_dir.empty() ||
(image_file.empty() && image_dir.empty() &&
video_file.empty())) {
std::cout << "Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ "
<< "--image_file=/PATH/TO/INPUT/IMAGE/" << std::endl;
return -1;
}
if (!(run_mode == "paddle" || run_mode == "trt_fp32" ||
run_mode == "trt_fp16" || run_mode == "trt_int8")) {
std::cout
<< "run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or 'trt_int8'.";
return -1;
}
transform(device.begin(),
device.end(),
device.begin(),
::toupper);
if (!(device == "CPU" || device == "GPU" ||
device == "XPU")) {
std::cout << "device should be 'CPU', 'GPU' or 'XPU'.";
return -1;
}
//if (use_gpu) {
// std::cout << "Deprecated, please use `--device` to set the device you want "
// "to run.";
// return -1;
//}
// Load model and create a object detector
PaddleDetection::ObjectDetector det(model_dir,
device,
use_mkldnn,
cpu_threads,
run_mode,
batch_size,
gpu_id,
trt_min_shape,
trt_max_shape,
trt_opt_shape,
trt_calib_mode);
// Do inference on input video or image
if (!PathExists(output_dir)) {
MkDirs(output_dir);
}
if (!video_file.empty() || camera_id != -1) {
PredictVideo(video_file, &det, output_dir);
}
else if (!image_file.empty() || !image_dir.empty()) {
std::vector<std::string> all_img_paths;
std::vector<cv::String> cv_all_img_paths;
if (!image_file.empty()) {
all_img_paths.push_back(image_file);
if (batch_size > 1) {
std::cout << "batch_size should be 1, when set `image_file`."
<< std::endl;
return -1;
}
}
else {
cv::glob(image_dir, cv_all_img_paths);
for (const auto& img_path : cv_all_img_paths) {
all_img_paths.push_back(img_path);
}
}
PredictImage(all_img_paths,
batch_size,
threshold,
run_benchmark,
&det,
output_dir);
}
return 0;
}