CMakeLists.txt
cmake_minimum_required(VERSION 2.6) # requirement of cmake version
PROJECT(Test1_VERSION_1.0) # project name
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") # set the directory of executable files
set(CMAKE_BUILD_TYPE Debug)
set(OpenCV_DIR "/home/opencv-3.4.16/build") # 如果你的opencv找不到,可以自己指定目录
find_package(OpenCV REQUIRED) # find required opencv
include_directories($ { OpenCV_INCLUDE_DIRS} ) # directory of opencv headers
add_executable($ { PROJECT_NAME} opencv_demo.cpp) # name of executable file and path of source file
link_directories($ { OpenCV_LIBRARY_DIRS} ) # directory of opencv library,没有也可以
target_link_libraries($ { PROJECT_NAME} $ { OpenCV_LIBS} ) # opencv libraries
测试代码
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main()
{
cout<<"opencv test" <<endl;
Mat src_image_ = imread ( "lena.png" ) ; // 读取当前目录下的test.jpg图片
imwrite ( "out1213.png" , src_image_) ;
// imshow ( "src_image_" , src_image_) ; // 打开图片文件
// waitKey ( 5000) ; // 停留5秒钟
return 0;
}
x86_64:CMakeLists.txt ==>trt.txt
cmake_minimum_required(VERSION 3.0) # requirement of cmake version
PROJECT(Test1_VERSION_1.0) # project name
# add_definitions(-std=c++11)
option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall") # set the directory of executable files
set(CMAKE_BUILD_TYPE Debug)
# set(EXECUTABLE_OUTPUT_PATH $ { PROJECT_SOURCE_DIR} /workspace)
set(OpenCV_DIR "/home/opencv-3.4.16/build") # 如果你的opencv找不到,可以自己指定目录
set(CUDA_DIR "/usr/local/cuda")
set(TENSORRT_DIR "/home/TensorRT-7.0.0.11")
find_package(OpenCV REQUIRED) # find required opencv
find_package(CUDA REQUIRED)
include_directories(
$ { PROJECT_SOURCE_DIR} /include
$ { OpenCV_INCLUDE_DIRS} # directory of opencv headers
$ { CUDA_DIR} /include
$ { TENSORRT_DIR} /include
$ { TENSORRT_DIR} /samples/common
)
link_directories(
$ { OpenCV_LIBRARY_DIRS} # directory of opencv library,没有也可以
$ { CUDA_DIR} /lib64
$ { TENSORRT_DIR} /lib
)
file(GLOB_RECURSE cpp_srcs $ { PROJECT_SOURCE_DIR} /src/*.cpp $ { TENSORRT_DIR} /samples/common/logger.cpp)
add_executable($ { PROJECT_NAME} $ { cpp_srcs} ) # name of executable file and path of source file
file(GLOB_RECURSE TENSORRT_LIBS "/home/TensorRT-7.0.0.11/lib/*.so")
#file(GLOB_RECURSE CUDA_LIBS $ { CUDA_DIR} /lib64/*.so)
message("CUDA_LIBS------------------------>" $ { CUDA_LIBS} )
target_link_libraries($ { PROJECT_NAME} $ { OpenCV_LIBS} ) # opencv libraries
target_link_libraries($ { PROJECT_NAME} $ { TENSORRT_LIBS} )
target_link_libraries($ { PROJECT_NAME} $ { CUDA_LIBS} )
aarch64:CMakeLists.txt ==>trt.txt
# CMake file for CUDA_YOLOV5
# 设定依赖的CMake版本
cmake_minimum_required(VERSION 3.0) # requirement of cmake version
# 指定项目名称
PROJECT(MS_Unet) # project name
# # add_definitions(-std=c++11)
# option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") # set the directory of executable files
# set(CMAKE_BUILD_TYPE Debug)
# set(EXECUTABLE_OUTPUT_PATH $ { PROJECT_SOURCE_DIR} /workspace)
set ( OpenCV_DIR "~/ws/opencv-3.4.16/opencv-3.4.16/build" ) # 如果你的opencv找不到,可以自己指定目录
set ( CUDA_DIR "/usr/local/cuda" )
set ( TENSORRT_DIR "/usr/src/tensorrt" )
# 搜索指定的OpenCV外部依赖库头文件和库文件位置
find_package ( OpenCV REQUIRED)
MESSAGE ( STATUS "===============OpenCV_FOUND====================: $ { OpenCV_FOUND} ")
# 搜索指定的CUDA外部依赖库头文件和库文件位置 # find required opencv
find_package ( CUDA REQUIRED)
MESSAGE ( STATUS "===============CUDA_FOUND======================: $ { CUDA_FOUND} ")
# if ( NOT CUDA_FOUND )
# message ( STATUS "CUDA Not Found. Project will not be build." )
# endif ( CUDA_FOUND )
# 打印相关信息, CMAKE_CURRENT_SOURCE_DIR指的是当前处理的CMakeLists.txt所在的路径
MESSAGE ( STATUS "current path: $ { CMAKE_CURRENT_SOURCE_DIR} ")
# 使CMake支持C++11特性
SET ( CMAKE_C_FLAGS "$ { CMAKE_C_FLAGS} -std=gnu++0x")
SET ( CMAKE_CXX_FLAGS "$ { CMAKE_CXX_FLAGS} -std=gnu++0x")
option ( CUDA_USE_STATIC_CUDA_RUNTIME OFF)
# set ( CMAKE_CXX_FLAGS "$ { CMAKE_CXX_FLAGS} -std=c++11") # set the directory of executable files
set ( CMAKE_BUILD_TYPE Debug)
# 使CUDA NVCC 编译器支持C++11特性
LIST ( APPEND CUDA_NVCC_FLAGS -std=c++11; -O2)
LIST ( APPEND CUDA_NVCC_FLAGS -Xcompiler; -fPIC)
LIST(APPEND CUDA_NVCC_FLAGS -gencode arch=compute_72,code=sm_72)
include_directories(
$ { PROJECT_SOURCE_DIR} /include
$ { OpenCV_INCLUDE_DIRS} # directory of opencv headers
$ { CUDA_DIR} /include
$ { TENSORRT_DIR} /include
$ { TENSORRT_DIR} /samples/common
)
# tensorRT
include_directories(/usr/local/cuda/include)
link_directories(/usr/lib/arrch64-linux-gnu)
link_directories(
$ { OpenCV_LIBRARY_DIRS} # directory of opencv library,没有也可以
$ { CUDA_DIR} /lib64
# $ { TENSORRT_DIR} /lib
)
MESSAGE(STATUS "CUDA_DIR path: ${CUDA_DIR}")
MESSAGE(STATUS "TENSORRT_DIR path: ${TENSORRT_DIR}")
MESSAGE(STATUS "**************************************")
file(GLOB_RECURSE cpp_srcs $ { PROJECT_SOURCE_DIR} /src/*.cpp $ { TENSORRT_DIR} /samples/common/logger.cpp)
# 递归查询所有匹配的文件:*.cpp和*.cu
FILE(GLOB_RECURSE CPP_LIST $ { PROJECT_SOURCE_DIR} /src/*.cpp $ { TENSORRT_DIR} /samples/common/logger.cpp)
FILE(GLOB_RECURSE CU_LIST $ { PROJECT_SOURCE_DIR} /src/*.cu)
MESSAGE(STATUS "cpp list: ${CPP_LIST}")
MESSAGE(STATUS "cu list: ${CU_LIST}")
# add_executable($ { PROJECT_NAME} $ { cpp_srcs} ) # name of executable file and path of source file
# 编译可执行程序,即生成可执行文件CUDA_Test
#ADD_EXECUTABLE(CUDA_Test $ { CPP_LIST} $ { CU_LIST} ) # 不包括编译*.cu文件,需要使用CUDA_ADD_EXECUTABLE命令
CUDA_ADD_EXECUTABLE($ { PROJECT_NAME} $ { CPP_LIST} $ { CU_LIST} )
# file(GLOB_RECURSE CUDA_LIBS "/usr/local/cuda/targets/aarch64-linux/lib/*.so")
# file(GLOB_RECURSE TENSORRT_LIBS "/usr/lib/aarch64-linux-gnu/*.so")
target_link_libraries($ { PROJECT_NAME} $ { OpenCV_LIBS} ) # opencv libraries
target_link_libraries($ { PROJECT_NAME} nvinfer nvonnxparser)
target_link_libraries($ { PROJECT_NAME} cudart)
MESSAGE ( STATUS "CUDA_LIBS list: $ { CUDA_LIBS} ")
MESSAGE ( STATUS "TENSORRT_LIBS list: $ { TENSORRT_LIBS} ")
MESSAGE ( STATUS "**************************************" )