c++ 调用libtorch1.8GPU + cuda11.1+rtx3090+18.04 torch::cuda::is_available()为false 问题记录

文章详细介绍了如何在三个不同的构建系统——cmake、makefile和QT环境中配置和使用libtorch库来调用GPU。关键在于添加特定的编译标志,如`-Wl,--no-as-needed-ltorch_cuda`,以确保链接时包含所有必要的动态库。在每个示例中,作者都提供了具体的代码片段和命令行指令,展示了如何正确配置项目以成功调用GPU功能。
摘要由CSDN通过智能技术生成

 1、使用cmake搭建工程:
是网上常见的cmake建立的工程,这个是可以正常调用GPU
在文件夹下建立三个文件CMakeLists.txt、 example-app.cpp、build
CMakeLists.txt:

cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(example-app)
set(Torch_DIR /opt/libs-x64/libtorch_gpu1.8/share/cmake/Torch)

find_package(Torch REQUIRED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
message(WARNING ${TRY_COMPILE_OUTPUT})
message("hello")
message("${TORCH_LIBRARIES}")

add_executable(example-app example-app.cpp)
target_link_libraries(example-app "${TORCH_LIBRARIES}")
set_property(TARGET example-app PROPERTY CXX_STANDARD 14)

 example-app.cpp:
 

include "torch/script.h"
include "torch/torch.h"
using namespace torch;
using namespace std;
int main(int argc, char *argv[])
{    
    cc = torch::cuda::is_available();
    cout << "is_available:" << torch::cuda::is_available() << "\n";
    cout << "is_available:" << torch::cuda::cudnn_is_available()<< "\n";
    cout << "is_available:" << torch::cuda::device_count()<< "\n";
    if (torch::cuda::is_available()) {
        cout << "cuda!" << endl;
        torch::DeviceType device_type = at::kCUDA;
    }
    else
    {
        cout << "cpu" << endl;
    }
    return 1;
}

在build打开终端 执行:

cmake ..
make -j8 
./example-app

2、使用makefile搭建工程:
在文件夹下建立2个文件makefile、 src 放main.cpp 和上边代码一样
makefile:

TARGET=demo
CXX=g++

CXXFLAGS= -O3 -Wall  -fPIC\
       -std=c++14 \
       -I/usr/local/include \
       -I/opt/libs-x64/libtorch_gpu1.8/include \
       -I/opt/libs-x64/libtorch_gpu1.8/include/torch/csrc/api/include
       
CXXLIBS= \
      -L/usr/lib \
      -L/usr/local/lib \
      -L/opt/libs-x64/libtorch_gpu1.8/lib \
      -lc10 -lc10_cuda \
      -ltorch_cpu -ltorch_cuda


SRCS=$(wildcard ./src/*.cpp)
OBJS=$(SRCS:.cpp=.o)


all: $(TARGET)

$(TARGET): $(OBJS)
	$(CXX) -o $@ $^ $(CXXFLAGS) $(CXXLIBS)


clean:
	rm -rf src/*.o $(TARGET)

.PHONY: clean

测试无法调用gpu:

 在makefile文件夹下打开终端:输入 
发现gpu没有调用成功,此时需要修改makefile 添加一句 |
CXXFLAGS += -Wl,--no-as-needed -ltorch_cuda  
make clean 
make -j8
./demo 
即可正常调用 gpu 输出:

参考:《jetson系列编译部署libtorch》_安阳知秋-CSDN博客_libtorch源码编译

 3、使用qt搭建工程:
本测试工程只用QT的调试环境,他的界面信息都没有引入所以 不用
本工程只有一个main.cpp
.pro我文件 如下: 最重要的是添加 这句话  LIBS += -Wl,--no-as-needed -ltorch_cuda


QT       -= core gui
#greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
CONFIG += c++14 console
TARGET = testgpu
TEMPLATE = app

QMAKE_CXXFLAGS += -D_GLIBCXX_USE_CXX11_ABI=0

INCLUDEPATH += /usr/local/cuda-11.1/include
DEPENDPATH += /usr/local/cuda-11.1/lib64
LIBS += -L/usr/local/cuda-11.1/lib64 \
    -lcuda \
    -lcudart \
    -lnvrtc \
    -lnvToolsExt

INCLUDEPATH += /opt/libs-x64/libtorch_gpu1.8/include \
               /opt/libs-x64/libtorch_gpu1.8/include/torch/csrc/api/include
DEPENDPATH += /opt/libs-x64/libtorch_gpu1.8/lib


LIBS += -L/opt/libs-x64/libtorch_gpu1.8/lib \
    -ltorch \
    -ltorch_cpu \
    -lc10 \
    -lc10_cuda \
    -lcaffe2_nvrtc \
    -lcpuinfo \
    -lcaffe2_module_test_dynamic \
    -lcaffe2_observers \
    -lcaffe2_nvrtc \
    -lcaffe2_detectron_ops_gpu \
    -ltorch_cuda_cpp -ltorch_cuda_cu \
    -Wl,--no-as-needed -ltorch_cuda

SOURCES += \
        main.cpp

其测试结果如下 可以正常调用 gpu:

原因分析:

-as-needed 就是忽略链接时没有用到的动态库

--no-as-needed 就是不忽略链接时没有用到的动态库


参考资料:

cmake_minimum_required(VERSION 3.10)
PROJECT (testTorch)

# #设置编译选项
set(CMAKE_C_COMPILER gcc)
set(CMAKE_CXX_COMPILER g++)
# 指定为C++11 版本
set(CMAKE_CXX_STANDARD 14)
# # set(DEFINES " -Dlib_protobuf -Dlib_webSocket -Dlib_zmq -Dlib_log4cpp -Dlib_eigen -Dlib_decision -Dlib-breakpad -Dlib-ibusCommon -Dlib_opencv -Dlib_uchardet  -Dlib-zmq -Dlib-thread -Dlib_iconv -Dlib_uchardet -Dlib_ffmpeg -Dlib_x264 -Dlib_xvid -Dlib_lame ")
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pipe -Wno-multichar -Wno-strict-aliasing -Wno-unused-parameter -Wno-psabi -Wno-varargs  -Wall -W -fPIC ${DEFINES}")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pipe -fopenmp -Wno-multichar -Wno-strict-aliasing -Wno-unused-parameter -Wno-psabi -Wno-varargs -std=gnu++1y -Wall -W -fPIC ${DEFINES}")
message("source dir ${CMAKE_CURRENT_LIST_DIR}")

# specify the C++ standard

#set(CMAKE_CXX_COMPILE_FEATURES "${CMAKE_CXX_COMPILE_FEATURES};-fopenmp")
#set(CMAKE_CXX_FLAGS  "-fopenmp ${CMAKE_CXX_FLAGS} -Wno-multichar -Wno-strict-aliasing -Wno-unused-parameter -Wno-psabi -Wno-varargs  -std=gnu++11 -Wall -W -fPIC")



set(INC_DIR "/opt/libs-x64/libtorch_gpu1.71/include") #这里设置INC_DIR 的地址为/usr/local/include
include_directories(${INC_DIR}) #这个地方放头文件的地方,从INC_DIR 中找。
set(INC_DIR "/opt/libs-x64/libtorch_gpu1.71/include/torch/csrc/api/include") #这里设置INC_DIR 的地址为/usr/local/include
include_directories(${INC_DIR}) #这个地方放头文件的地方,从INC_DIR 中找。
set(INC_DIR "/usr/local/cuda-10.1/include") #这里设置INC_DIR 的地址为/usr/local/include
include_directories(${INC_DIR}) #这个地方放头文件的地方,从INC_DIR 中找。

link_directories("/opt/libs-x64/libtorch_gpu1.71/lib")
link_directories("/usr/local/cuda-10.1/lib64")


# SET(SRC_LIST ./src/main.cpp)
# ADD_EXECUTABLE(monitor ${SRC_LIST})

  find_package(Caffe2 REQUIRED PATHS "/opt/libs-x64/libtorch_gpu1.71/share/cmake/Caffe2")
  # Create imported target torch_cuda_library
  add_library(torch_cuda_library1 INTERFACE IMPORTED)
  set_target_properties(torch_cuda_library1 PROPERTIES
   INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$<TARGET_FILE:torch_cuda>\" -Wl,--as-needed;\$<TARGET_PROPERTY:torch_cuda,INTERFACE_LINK_LIBRARIES>"
  # INTERFACE_LINK_LIBRARIES " -Wl,--as-needed;\$<TARGET_PROPERTY:torch_cuda,INTERFACE_LINK_LIBRARIES>"
   #INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$<TARGET_FILE:torch_cuda>\"  -Wl,--as-needed; "
 )
 message("hello")
message("${TARGET_FILE}")
message($<TARGET_PROPERTY:torch_cuda>)


# #包含原程序,即把给定目录下的源程序复制给变量DIR_SRC
# aux_source_directory(DIR_SRC ./src) 
# aux_source_directory(./src DIR_SRCS)
# # #生成程序
# add_executable(monitor ${DIR_SRCS})
add_executable(testTorch ./main.cpp)
# target_link_libraries(monitor ${ZeroMQ_LIBRARY})#链接起来
target_link_libraries(testTorch torch_cuda_library1)
target_link_libraries(testTorch -lc10)
#target_link_libraries(testTorch -ltorch_cpu)
target_link_libraries(testTorch -lc10_cuda)
target_link_libraries(testTorch -ltorch_cuda)
target_link_libraries(testTorch -ltorch)
target_link_libraries(testTorch -lcudart)
target_link_libraries(testTorch -lnvrtc)
target_link_libraries(testTorch -lnvToolsExt)
target_link_libraries(testTorch -lcuda)


 

参考makefile  :《jetson系列编译部署libtorch》_安阳知秋-CSDN博客_libtorch源码编译参考:cmake 

[cmake][转载]add_library详解_FL1623863129的博客-CSDN博客

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值