4. BatchNorm和NonLinearity 实现

本文档介绍了如何利用C++的Eigen库实现卷积神经网络(CNN)中的反卷积(TransposedConv2d)操作,包括带膨胀率的实现,并详细阐述了BatchNorm2d和Nonlinearity(如Sigmoid、Tanh、ReLU等)的实现过程,提供了相应的头文件和源代码示例。
摘要由CSDN通过智能技术生成

[C++ 基于Eigen库实现CRN前向推理]

第三部分:TransposedConv2d实现 (含dilation)

1. 基于Eigen实现BatchNorm2d
1.1 Layer_BatchNorm2d.h
//
// Created by Koer on 2022/10/31.
//

#ifndef CRN_LAYER_BATCHNORM2D_H
#define CRN_LAYER_BATCHNORM2D_H


#include "Eigen"
#include "mat.h"
#include "Eigen/CXX11/Tensor"

class Layer_BatchNorm2d {
   
public:
    Layer_BatchNorm2d();

    Layer_BatchNorm2d(int64_t bn_ch);

    void LoadState(MATFile *pmFile, const std::string &state_preffix);

    void LoadTestState();

    Eigen::Tensor<float_t, 4> forward(Eigen::Tensor<float_t, 4> &input);

private:
    int64_t channels;
    Eigen::Tensor<float_t, 2> weights;
    Eigen::Tensor<float_t, 2> bias;
    Eigen::Tensor<float_t, 2> running_mean;
    Eigen::Tensor<float_t, 2> running_var;
    int32_t num_batches_tracked;


};


#endif //CRN_LAYER_BATCHNORM2D_H

1.2 Layer_BatchNorm2d.cpp
//
// Created by Koer on 2022/10/31.
//
#include "iostream"
#include "../include/Layer_BatchNorm2d.h"

Layer_BatchNorm2d::Layer_BatchNorm2d() {
   
    this->channels = 1;
}

Layer_BatchNorm2d::Layer_BatchNorm2d(int64_t bn_ch) {
   
    this->channels = bn_ch;
}

void Layer_BatchNorm2d::LoadState(MATFile *pmFile, const std::string &state_preffix) {
   
    std::string weight_name = state_preffix + "_weight";
    std::string bias_name = state_preffix + "_bias";
    std::string rm_name = state_preffix + "_running_mean";
    std::string rv_name = state_preffix + "_running_var";
    std::string nbt_name = state_preffix + "_num_batches_tracked";

    mxArray *pa = matGetVariable(pmFile, weight_name.c_str());
    auto *values = (float_t *) mxGetData(pa);
    long long dim1 = mxGetM(pa);
    long long dim2 = mxGetN(pa);
    this->weights.resize(dim1, dim2);
    int idx = 0;
    for (int i = 0; i < dim2; i++) {
   
        for (int j = 0; j < dim1; j++) {
   
            this->weights(j, i) =<
ResNet18是一个非常流行的深度学习模型,它的结构是由多个残差块组成的。在每个残差块内部,包含了多个卷积层和标准化层,以及一个跳跃连接(skip connection)。 在PyTorch中,可以通过修改ResNet18的forward方法来实现定制化的前向过程。下面是一个修改ResNet18前向过程的示例代码: ```python import torch.nn as nn import torch.utils.model_zoo as model_zoo # 定义ResNet18模型 model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', } class ResNet18(nn.Module): def __init__(self, num_classes=1000): super(ResNet18, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) self.layer2 = nn.Sequential( nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), ) self.layer3 = nn.Sequential( nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), ) self.layer4 = nn.Sequential( nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), ) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) # 修改层1的前向过程 x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x ``` 在上面的代码中,我们通过修改ResNet18的forward方法来实现了对第一层的定制化。具体来说,我们将原来的layer1替换为了一个新的Sequential对象,其中包含了两个卷积层和标准化层,并且在两个卷积层之间插入了一个ReLU激活函数。 这样做的好处是可以更加灵活地定制ResNet18模型的前向过程,以适应不同的任务需求。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值