Cmake使用

12 篇文章 0 订阅

cmake初步使用

project (HELLO)   #指定项目名称,生成的VC项目的名称;

>>使用${HELLO_SOURCE_DIR}表示项目根目录

include_directories:指定头文件的搜索路径,相当于指定gcc的-I参数

>> include_directories (${HELLO_SOURCE_DIR}/Hello)  #增加Hello为include目录

link_directories:动态链接库或静态链接库的搜索路径,相当于gcc的-L参数

       >> link_directories (${HELLO_BINARY_DIR}/Hello)     #增加Hello为link目录

add_subdirectory:包含子目录

       >> add_subdirectory (Hello)

add_executable:编译可执行程序,指定编译,好像也可以添加.o文件

       >> add_executable (helloDemo demo.cxx demo_b.cxx)   #将cxx编译成可执行文件——

add_definitions:添加编译参数

>> add_definitions(-DDEBUG)将在gcc命令行添加DEBUG宏定义;

>> add_definitions( “-Wall -ansi –pedantic –g”)

target_link_libraries:添加链接库,相同于指定-l参数

>> target_link_libraries(demo Hello) #将可执行文件与Hello连接成最终文件demo

add_library:

>> add_library(Hello hello.cxx)  #将hello.cxx编译成静态库如libHello.a

add_custom_target:

message( status|fatal_error, “message”):

set_target_properties( ... ): lots of properties... OUTPUT_NAME, VERSION, ....

link_libraries( lib1 lib2 ...): All targets link with the same set of libs

camke pytorch demo

#include <torch/torch.h>
#include <iostream>
#include <ostream>
#include <strstream>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/autograd/function.h>
namespace torch_wrap{
    using namespace torch;
    using namespace torch::cuda;
};

using namespace torch_wrap;

int main2() {
    torch::manual_seed(1000);
    auto a = torch::randn({1}).requires_grad_(true);
    auto b = torch::randn({1}).requires_grad_(true);
    auto cc=2*a+3*b;
    std::ostrstream es1d;

    es1d << a << std::endl;
    string ccc=es1d.str();
    std::cout << ccc << std::endl;

//    auto grad_output = torch::ones_like(cc);
//    auto gradient = torch::autograd::grad({cc}, {a,b}, /*grad_outputs=*/{grad_output}, /*create_graph=*/true)[0];
    auto gradient = torch::autograd::grad({cc}, {a,b})[0];

    cc.backward();
    std::ostrstream esd;

    esd << a.grad() << std::endl;
    string cc1c=esd.str();
//
//    auto sdd=a.show_str();

    std::cout << cc1c << std::endl;





//    auto input = torch::randn({1,3,4,4}).requires_grad_(true);
//
//    bool ss=torch_wrap::cuda::is_available();
//    torch_wrap::nn::Conv2d model(torch::nn:: Conv2dOptions(3, 5, 3).stride(1).bias(false));
//    std::ostrstream esd;
//    esd << model.get()->weight.grad() << std::endl;
//    string cc=esd.str();
//    auto output = model(input);
//    output.mean();
//    //
//
//    auto grad_output = torch::ones_like(output);
//    auto gradient = torch::autograd::grad({output}, {input}, /*grad_outputs=*/{grad_output}, /*create_graph=*/true)[0];
    auto gradient_penalty = torch::pow((gradient.norm(2, /*dim=*/1) - 1), 2).mean();
//    std::ostrstream es;
//    es << model.get()->weight.grad() << std::endl;
//    string c=es.str();
//    int mmm=0;
//    std::vector<model.get()->weight.grad()>
// Calculate loss
//    auto target = torch::randn({3, 3});
//    auto loss = torch::nn::MSELoss()(output, target);

// Use norm of gradients as penalty
//    auto grad_output = torch::ones_like(output);
//    auto gradient = torch::autograd::grad({output}, {input}, /*grad_outputs=*/{grad_output}, /*create_graph=*/true)[0];
//    auto gradient_penalty = torch::pow((gradient.norm(2, /*dim=*/1) - 1), 2).mean();
//
 Add gradient penalty to loss
//    auto combined_loss = loss + gradient_penalty;
//    combined_loss.backward();
//
//    std::cout << input.grad() << std::endl;
}



#include <torch/torch.h>
#include <iostream>

using namespace torch::autograd;

void basic_autograd_operations_example() {
    std::cout << "====== Running: \"Basic autograd operations\" ======" << std::endl;

    // Create a tensor and set ``torch::requires_grad()`` to track computation with it
    auto x = torch::ones({2, 2}, torch::requires_grad());
    std::cout << x << std::endl;

    // Do a tensor operation:
    auto y = x + 2;
    std::cout << y << std::endl;
    auto a111 = torch::randn({1,}).requires_grad_();

    // ``y`` was created as a result of an operation, so it has a ``grad_fn``.
    std::cout << y.grad_fn()->name() << std::endl;

    // Do more operations on ``y``
    auto z = pow(y * y +1,2);
    auto out = z.mean();

    std::cout <<"zzzzzzzzzzzzz"<< z << std::endl;

    std::cout << z.grad_fn()->name() << std::endl;
//    std::cout << z.grad_fn().get()-> next_edges()<< std::endl;

    std::cout << out << std::endl;
    std::cout << out.grad_fn()->name() << std::endl;

    // ``.requires_grad_( ... )`` changes an existing tensor's ``requires_grad`` flag in-place.
    std::cout <<"aaaaaaaaaaaaaaaaaaaaaaaa" << std::endl;

    auto a = torch::randn({2, 2});
    a = ((a * 3) / (a - 1));
    std::cout << a.requires_grad() << std::endl;

    a.requires_grad_(true);
    std::cout << a.requires_grad() << std::endl;

    auto b = (a * a).sum();
    std::cout << b.grad_fn()->name() << std::endl;
//    torch::autograd::Function
    // Let's backprop now. Because ``out`` contains a single scalar, ``out.backward()``
    // is equivalent to ``out.backward(torch::tensor(1.))``.
    out.backward();

    // Print gradients d(out)/dx
    std::cout << x.grad() << std::endl;

    // Now let's take a look at an example of vector-Jacobian product:
    x = torch::randn(3, torch::requires_grad());

    y = x * 2;
    while (y.norm().item<double>() < 1000) {
        y = y * 2;
    }

    std::cout << y << std::endl;
    std::cout << y.grad_fn()->name() << std::endl;

    // If we want the vector-Jacobian product, pass the vector to ``backward`` as argument:
    auto v = torch::tensor({0.1, 1.0, 0.0001}, torch::kFloat);
    y.backward(v);

    std::cout << x.grad() << std::endl;

    // You can also stop autograd from tracking history on tensors that require gradients
    // either by putting ``torch::NoGradGuard`` in a code block
    std::cout << x.requires_grad() << std::endl;
    std::cout << x.pow(2).requires_grad() << std::endl;

    {
        torch::NoGradGuard no_grad;
        std::cout << x.pow(2).requires_grad() << std::endl;
    }

    // Or by using ``.detach()`` to get a new tensor with the same content but that does
    // not require gradients:
    std::cout << x.requires_grad() << std::endl;
    y = x.detach();
    std::cout << y.requires_grad() << std::endl;
    std::cout << x.eq(y).all().item<bool>() << std::endl;
}

void compute_higher_order_gradients_example() {
    std::cout << "====== Running \"Computing higher-order gradients in C++\" ======" << std::endl;

    // One of the applications of higher-order gradients is calculating gradient penalty.
    // Let's see an example of it using ``torch::autograd::grad``:

    auto model = torch::nn::Linear(4, 3);

    auto input = torch::randn({3, 4}).requires_grad_(true);
    auto output = model(input);

    // Calculate loss
    auto target = torch::randn({3, 3});
    auto loss = torch::nn::MSELoss()(output, target);

    // Use norm of gradients as penalty
    auto grad_output = torch::ones_like(output);
    auto gradient = torch::autograd::grad({output}, {input}, /*grad_outputs=*/{grad_output}, /*create_graph=*/true)[0];
    auto gradient_penalty = torch::pow((gradient.norm(2, /*dim=*/1) - 1), 2).mean();

    // Add gradient penalty to loss
    auto combined_loss = loss + gradient_penalty;
    combined_loss.backward();

    std::cout << input.grad() << std::endl;
}

// Inherit from Function
class LinearFunction : public Function<LinearFunction> {
public:
    // Note that both forward and backward are static functions

    // bias is an optional argument
    static torch::Tensor forward(
            AutogradContext *ctx, torch::Tensor input, torch::Tensor weight, torch::Tensor bias = torch::Tensor()) {
        ctx->save_for_backward({input, weight, bias});
        auto output = input.mm(weight.t());
        if (bias.defined()) {
            output += bias.unsqueeze(0).expand_as(output);
        }
        return output;
    }

    static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
        auto saved = ctx->get_saved_variables();
        auto input = saved[0];
        auto weight = saved[1];
        auto bias = saved[2];

        auto grad_output = grad_outputs[0];
        auto grad_input = grad_output.mm(weight);
        auto grad_weight = grad_output.t().mm(input);
        auto grad_bias = torch::Tensor();
        if (bias.defined()) {
            grad_bias = grad_output.sum(0);
        }

        return {grad_input, grad_weight, grad_bias};
    }
};

class MulConstant : public Function<MulConstant> {
public:
    static torch::Tensor forward(AutogradContext *ctx, torch::Tensor tensor, double constant) {
        // ctx is a context object that can be used to stash information
        // for backward computation
        ctx->saved_data["constant"] = constant;
        return tensor * constant;
    }

    static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
        // We return as many input gradients as there were arguments.
        // Gradients of non-tensor arguments to forward must be `torch::Tensor()`.
        return {grad_outputs[0] * ctx->saved_data["constant"].toDouble(), torch::Tensor()};
    }
};

void custom_autograd_function_example() {
    std::cout << "====== Running \"Using custom autograd function in C++\" ======" << std::endl;
    {
        auto x = torch::randn({2, 3}).requires_grad_();
        auto weight = torch::randn({4, 3}).requires_grad_();
        auto y = LinearFunction::apply(x, weight);
        y.sum().backward();

        std::cout << x.grad() << std::endl;
        std::cout << weight.grad() << std::endl;
    }
    {
        auto x = torch::randn({2}).requires_grad_();
        auto y = MulConstant::apply(x, 5.5);
        y.sum().backward();

        std::cout << x.grad() << std::endl;
    }
}

void testjit() {
    auto module = torch::jit::compile(R"JIT(
          def test_mul(a, b):
            return a * b
          def test_relu(a, b):
            return torch.relu(a + b)+3
          def test_while(a, i):
            while bool(i < 10):
              a += a
              i += 1
            return a
          def test_len(a : List[int]):
            return len(a)
        )JIT");
    auto a = torch::ones(1);
    auto b = torch::ones(1);
    std::cout <<module->run_method("test_relu", a, b).toTensor().item<int64_t>()<<std::endl ;
    std::cout <<module<<std::endl ;

//    ASSERT_EQ(2, module->run_method("test_relu", a, b).toTensor().item<int64_t>());
//
//    ASSERT_TRUE(
//            0x200 == module->run_method("test_while", a, b).toTensor().item<int64_t>());

    at::IValue list = c10::List<int64_t>({3, 4});
//    ASSERT_EQ(2, module->run_method("test_len", list).toInt());


//    at::IValue list = c10::List<int64_t>({3, 4});

    }
auto c1udatest(){
//    std::vector<int64_t> sizes_vec({16, 8, 8});
//    std::vector<int64_t> strides_vec({64, 1, 8});
//    auto tensor_type = TensorType::create(
//            at::kFloat, c10::nullopt, sizes_vec, strides_vec, c10::nullopt);
//    auto options = at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
//    auto options = at::TensorOptions().dtype(at::kFloat).device(at::CPU);

    // broadcasting semantic change
//    auto t0 = at::randn({16, 8, 8}, options);
    auto a = torch::randn({1}).requires_grad_();
    auto b = torch::randn({1}).requires_grad_();
    auto c = torch::randn({1}).requires_grad_(true);
//    auto t0 = at::randn({16, 8, 8}, options);

//    torch_wrap::nn::Conv2d model(torch::nn:: Conv2dOptions(3, 5, 3).stride(1).bias(false));
    auto output1=c+b;
    std::cout << "outputoutput"<<output1 << std::endl;



}

auto conv2dtest(){
    using namespace std;
    auto options = at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
    const at::Device device("cuda");
    const at::Device devicecpu("cpu");
//    at::Tensor op;
    auto input = torch::randn({1,3,4,4},torch::TensorOptions().device(device));
    torch_wrap::nn::Conv2d model(torch::nn:: Conv2dOptions(3, 5, 3).stride(1).bias(false));
    torch_wrap::nn::Conv2d model2(torch::nn:: Conv2dOptions(5, 2, 1).stride(1).bias(false));


    //    auto input=torch::conv2d( torch::nn:: Conv2dOptions(3, 5, 3).stride(1).bias(false));
//    auto modelcuda = model.device(device);
    model->to(device);

//    model2->to(devicecpu);

    auto output1 = model(input);
    std::cout << "output"<<output1 << std::endl;

    auto output11=output1.to(devicecpu);

    auto output = model2(output11);

    std::cout << "output"<<output << std::endl;

    auto result =std::move(output);


    auto outputs =output.sum();
    std::cout << "outputs"<<outputs << std::endl;
    outputs.backward();
    std::cout << output.grad_fn()->name() << std::endl;

}

auto conv222dtest(){
//    torch::manual_seed(1000);
//    auto aq = torch::rand({10}, torch::requires_grad());
    auto a = torch::rand({2}).requires_grad_();
    auto b = torch::rand({1},torch::requires_grad());
    auto c = torch::rand({1},torch::requires_grad());

//    torch_wrap::nn::Conv2d model(torch::nn:: Conv2dOptions(3, 5, 3).stride(1).bias(false));
    auto output1=a+b;
    auto output=torch::exp( torch::pow(c,2)+torch::pow(b,3))+3*(torch::pow(c,2)+torch::pow(b,3));
    std::cout << "ccc"<<c << std::endl;
    std::cout << "expexpexpexp"<<torch::exp( torch::tensor(1)) << std::endl;

    std::cout << "outputoutput"<<output << std::endl;

//    auto out=output.sum();
    output.backward();
//    std::cout << "bbbb"<<b << std::endl;
//    std::cout << out.grad_fn()->name() << std::endl;

    std::cout << "cgrad"<<c.grad()<< std::endl;
//    std::cout << "outputoutputgrad"<<output.grad()<< std::endl;
    std::cout << "777777777777"<<2*c*torch::exp( torch::pow(c,2))<< std::endl;



}


int main() {
//    testjit();
//    conv2dtest();
//    main2();
    c1udatest();
//    conv222dtest();
//    std::cout << std::boolalpha;
//
//    basic_autograd_operations_example();
//
//    std::cout << "\n";
//
//    compute_higher_order_gradients_example();
//
//    std::cout << "\n";
//
//    custom_autograd_function_example();
}





需要完成如下转化,并放到pytorch/torch/lib下
在这里插入图片描述
在这里插入图片描述

cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(example-app)
find_package(Torch PATHS /opt/share1/xx/pytorch REQUIRED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
add_executable(example-app example-app.cpp)

set(FFmpeg_LIBS_DIRS "/opt/share1/XX/pytorch/torch/lib")
file(GLOB FFmpeg_LIBS_a ${FFmpeg_LIBS_DIRS}/*.a)
file(GLOB FFmpeg_LIBS_so ${FFmpeg_LIBS_DIRS}/*.so)

#set(Torch_LIBS_DIRS "/XX/pytorch/build/lib.linux-x86_64-3.7/torch")
#file(GLOB torch_LIBS_a ${Torch_LIBS_DIRS}/*.a)
#file(GLOB torch_LIBS_so ${Torch_LIBS_DIRS}/*.so)


include_directories("/XX/pytorch/torch/include")
include_directories("/XX/pytorch/build/lib.linux-x86_64-3.7/torch/include/torch/csrc/api/include")
#target_link_libraries(example-app ${TORCH_LIBRARIES})
target_link_libraries(example-app ${FFmpeg_LIBS_a} ${FFmpeg_LIBS_so}

        )

set_property(TARGET example-app PROPERTY CXX_STANDARD 14)

参考文献:

  • https://www.cnblogs.com/lidabo/p/7359422.html
  • https://blog.csdn.net/qq_33236581/article/details/111692448
  • https://blog.51cto.com/11496263/1789853
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值