- 进入pytorch官网 https://pytorch.org/get-started/locally/
下载源码
但我下载的是1.10.2版本的,下载链接如下:
https://download.pytorch.org/libtorch/cu113/libtorch-shared-with-deps-1.10.2%2Bcu113.zip
下载完成之后,解压到你喜欢的目录下
-
在Linux下打开CLion,创建一个Cmake的项目
选择C++标准为C++17,否则会运行报错
-
配置CMakeLists.txt文件:
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(libtorch_test)
list(APPEND CMAKE_PREFIX_PATH "/opt/src_code/libtorch")
add_executable(libtorch_test main.cpp)
find_package(Torch REQUIRED)
target_link_libraries(libtorch_test "${TORCH_LIBRARIES}")
set_property(TARGET libtorch_test PROPERTY CXX_STANDARD 17)
- 编辑main.cpp文件:
#include <torch/script.h>
#include <torch/torch.h>
#include <iostream>
int main() {
std::cout << "检查CDUA是否可用:" << torch::cuda::is_available() << std::endl;
std::cout << "检查cudnn是否可用:" << torch::cuda::cudnn_is_available() << std::endl;
std::clock_t s, e;
s = clock();
torch::Tensor cuda_output;
for (int i=0;i<1;i++) {
cuda_output = torch::randn({ 5, 4 }, torch::device(torch::kCUDA));
}
std::cout << cuda_output << std::endl;
e = clock();
std::cout << "use time: " << (e - s) << " 微秒" << std::endl;
return 0;
}
- 测试运行
打印出CUDAFloatype{5,4},那么意味着这个张量是在显卡内存中分配的。