libtorch C++ 搭建cnn网络

这篇博客介绍了如何利用Libtorch库搭建一个全卷积网络(CNN)。首先,定义了一个简单的CNN模型,该模型包含了7个卷积层。然后,通过实例化网络、准备训练数据、设置Adam优化器,进行前向传播、损失计算、反向传播和优化器步进的过程,实现了网络的训练。在训练过程中,损失逐渐下降,表明模型正在收敛。
摘要由CSDN通过智能技术生成

目录

1. 程序主体步骤:

2. 搭建cnn网络


1. 程序主体步骤

(1)实例化网络,该网络模型必须继承torch::nn::Module类;

(2)获取训练数据,其中输入数据维度和目标数据维度都是b,c,h,w;

(3)实例化优化器,这里用的是Adam,学习率是0.0003;

(4)forward,在求mse_loss,backward,step。

#include <opencv2/opencv.hpp>
#include <CNN.h>
#include <iostream>
#include <string>

int main(int argc, char* argv[])
{
    //CNN part
    auto cnn = plainCNN(3, 1);  // 1. 网络:简单的卷积网络,输入是3通道数据,输出的是1通道的预测图
    auto cnn_input = torch::randint(255, { 1,3,224,224 });  // 2.1 输入数据bchw:随机生成输入batch tensor数据
    auto cnn_target = torch::zeros({ 1,1,26,26 });  // 2.2 目标数据bchw
    torch::optim::Adam optimizer_cnn(cnn.parameters(), 0.0003);  // 3. 优化器:Adam
    for (int i = 0; i < 30; i++) {
        optimizer_cnn.zero_grad();
        auto out = cnn.forward(cnn_input);  // 4. forward
        auto loss = torch::mse_loss(out, cnn_target);  // 5. loss
        loss.backward();
        optimizer_cnn.step();
        std::cout << std::to_string(i) << ":" << loss.data() << std::endl;
    }
}

2. 搭建cnn网络

全卷积网络类plainCNN必须继承torch::nn::Module。对应的头文件,声明了7个卷积层(包含3次下采样)。

cnn.h

#ifndef CNN_H
#define CNN_H

#endif // CNN_H
#include"BaseModule.h"

class plainCNN : public torch::nn::Module{
public:
    plainCNN(int in_channels, int out_channels);  // 构造函数,参数只有特征图的输入和输出通道
    torch::Tensor forward(torch::Tensor x);  // 推理函数,输入的是tensor: bchw
private:
    int mid_channels[3] = {32,64,128};
    ConvReluBn conv1{nullptr};  // 3个(convReluBn + downsample)
    ConvReluBn down1{nullptr};
    ConvReluBn conv2{nullptr};
    ConvReluBn down2{nullptr};
    ConvReluBn conv3{nullptr};
    ConvReluBn down3{nullptr};
    torch::nn::Conv2d out_conv{nullptr};  // conv2
};

其中ConvReluBn类继承了torch::nn::Module.

// 声明:conv+relu+bn
class ConvReluBnImpl : public torch::nn::Module {
public:
    ConvReluBnImpl(int input_channel=3, int output_channel=64, int kernel_size = 3, int stride = 1);
    torch::Tensor forward(torch::Tensor x);
private:
    // Declare layers
    torch::nn::Conv2d conv{ nullptr };
    torch::nn::BatchNorm2d bn{ nullptr };
};
TORCH_MODULE(ConvReluBn);

// 定义:conv+relu+bn
// 其中conv的padding = kernel_size/2
ConvReluBnImpl::ConvReluBnImpl(int input_channel, int output_channel, int kernel_size, int stride) {
    
    conv = register_module("conv", torch::nn::Conv2d(conv_options(input_channel,output_channel,kernel_size,stride,kernel_size/2)));
    bn = register_module("bn", torch::nn::BatchNorm2d(output_channel));

}
// conv->relu->bn, 这里也可以改成conv+bn+relu
torch::Tensor ConvReluBnImpl::forward(torch::Tensor x) {
    x = torch::relu(conv->forward(x));
    x = bn(x);
    return x;
}

cnn.cpp

#include<CNN.h>

plainCNN::plainCNN(int in_channels, int out_channels){
    conv1 = ConvReluBn(in_channels,mid_channels[0],3);  // in_c, out_c, k_size,stride=1
    down1 = ConvReluBn(mid_channels[0],mid_channels[0],3,2); // stride = 2
    conv2 = ConvReluBn(mid_channels[0],mid_channels[1],3);
    down2 = ConvReluBn(mid_channels[1],mid_channels[1],3,2);  // stride = 2
    conv3 = ConvReluBn(mid_channels[1],mid_channels[2],3);
    down3 = ConvReluBn(mid_channels[2],mid_channels[2],3,2);  // stride = 2
    out_conv = torch::nn::Conv2d(conv_options(mid_channels[2],out_channels,3));  // 注意:定义普通二维卷积方式。

    conv1 = register_module("conv1",conv1);  // 注册各自的module,并返回
    down1 = register_module("down1",down1);
    conv2 = register_module("conv2",conv2);
    down2 = register_module("down2",down2);
    conv3 = register_module("conv3",conv3);
    down3 = register_module("down3",down3);
    out_conv = register_module("out_conv",out_conv);
}

torch::Tensor plainCNN::forward(torch::Tensor x){
    x = conv1->forward(x);  // 逐个forward,因为每个都是module,有各自的forward函数。
    x = down1->forward(x);
    x = conv2->forward(x);
    x = down2->forward(x);
    x = conv3->forward(x);
    x = down3->forward(x);
    x = out_conv->forward(x);
    return x;
}

BaseModule.h

#pragma once
#ifndef BASEMODULE_H
#define BASEMODULE_H

#endif // BASEMODULE_H

#include <torch/torch.h>
#include<torch/script.h>

inline torch::nn::Conv2dOptions conv_options(int64_t in_planes, int64_t out_planes, int64_t kerner_size,
    int64_t stride = 1, int64_t padding = 0, bool with_bias = false) {
    torch::nn::Conv2dOptions conv_options = torch::nn::Conv2dOptions(in_planes, out_planes, kerner_size);
    conv_options.stride(stride);
    conv_options.padding(padding);
    conv_options.bias(with_bias);
    return conv_options;
}

class ConvReluBnImpl : public torch::nn::Module {
public:
    ConvReluBnImpl(int input_channel=3, int output_channel=64, int kernel_size = 3, int stride = 1);
    torch::Tensor forward(torch::Tensor x);
private:
    // Declare layers
    torch::nn::Conv2d conv{ nullptr };
    torch::nn::BatchNorm2d bn{ nullptr };
};
TORCH_MODULE(ConvReluBn);

class LinearBnReluImpl : public torch::nn::Module{
public:
    LinearBnReluImpl(int intput_features, int output_features);
    torch::Tensor forward(torch::Tensor x);
private:
    //layers
    torch::nn::Linear ln{nullptr};
    torch::nn::BatchNorm1d bn{nullptr};
};
TORCH_MODULE(LinearBnRelu);

BaseModule.cpp

 

#include"BaseModule.h"

ConvReluBnImpl::ConvReluBnImpl(int input_channel, int output_channel, int kernel_size, int stride) {
    conv = register_module("conv", torch::nn::Conv2d(conv_options(input_channel,output_channel,kernel_size,stride,kernel_size/2)));
    bn = register_module("bn", torch::nn::BatchNorm2d(output_channel));

}

torch::Tensor ConvReluBnImpl::forward(torch::Tensor x) {
    x = torch::relu(conv->forward(x));
    x = bn(x);
    return x;
}

LinearBnReluImpl::LinearBnReluImpl(int in_features, int out_features){
    ln = register_module("ln", torch::nn::Linear(torch::nn::LinearOptions(in_features, out_features)));
    bn = register_module("bn", torch::nn::BatchNorm1d(out_features));
}

torch::Tensor LinearBnReluImpl::forward(torch::Tensor x){
    x = torch::relu(ln->forward(x));
    x = bn(x);
    return x;
}

 

loss逐渐下降,收敛

 参考:LibtorchTutorials/lesson3-BasicModels at main · AllentDan/LibtorchTutorials · GitHub

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Mr.Q

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值