libtorch 下载
libtorch-shared-with-deps-1.7.1+cu101.zip
例子下载
https://github.com/pytorch/examples/tree/master/cpp/autograd
cMakeList.txt
cmake_minimum_required(VERSION 2.8)
project(autograd)
set(CMAKE_CXX_STANDARD 14)
find_package(Torch REQUIRED)
add_executable(${PROJECT_NAME} "autograd.cpp")
target_link_libraries(${PROJECT_NAME} "${TORCH_LIBRARIES}")
# The following code block is suggested to be used on Windows.
# According to https://github.com/pytorch/pytorch/issues/25457,
# the DLLs need to be copied to avoid memory errors.
if (MSVC)
file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")
add_custom_command(TARGET ${PROJECT_NAME}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different
${TORCH_DLLS}
$<TARGET_FILE_DIR:${PROJECT_NAME}>)
endif (MSVC)
autograd.cpp
#include <torch/torch.h>
#include <iostream>
using namespace torch::autograd;
void basic_autograd_operations_example() {
std::cout << "====== Running: \"Basic autograd operations\" ======" << std::endl;
// Create a tensor and set ``torch::requires_grad()`` to track computation with it
auto x = torch::ones({2, 2}, torch::requires_grad());
std::cout << x << std::endl;
// Do a tensor operation:
auto y = x + 2;
std::cout << y << std::endl;
// ``y`` was created as a result of an operation, so it has a ``grad_fn``.
std::cout << y.grad_fn()->name() << std::endl;
// Do more operations on ``y``
auto z = y * y * 3;
auto out = z.mean();
std::cout << z << std::endl;
std::cout << z.grad_fn()->name() <