MNN框架源码解读--Convert模块

Convert模块-模型转换
一、编译Convert

cd MNN/
./schema/generate.sh
mkdir build
cd build
cmake .. -DMNN_BUILD_CONVERTER=true && make -j4

二、转换命令
进入build目录,可以看到刚才编译出的可执行文件MNNConvert。

tensorflow:
./MNNConvert -f TF --modelFile XXX.pb --MNNModel XXX.mnn --bizCode biz
Caffe:
./MNNConvert -f CAFFE --modelFile XXX.caffemodel --prototxt XXX.prototxt --MNNModel XXX.mnn --bizCode biz
Onnx:
./MNNConvert -f ONNX --modelFile XXX.onnx --MNNModel XXX.mnn --bizCode biz

三、源码解读
代码在tools/convert目录下,入口函数在文件MNNConvert.cpp,分为两大步骤:
a. 原始模型解析、转换为MNN模型
b. MNN模型优化

int main(int argc, char *argv[]) {
    modelConfig modelPath;

    // parser command line arg
    try {
        Cli::initializeMNNConvertArgs(modelPath, argc, argv);
        Cli::printProjectBanner();

        Global<modelConfig>::Reset(&modelPath);
        auto options = common::BuildOptions(modelPath.compressionParamsFile);

        std::cout << "Start to Convert Other Model Format To MNN Model..." << std::endl;
        std::unique_ptr<MNN::NetT> netT = std::unique_ptr<MNN::NetT>(new MNN::NetT());
        // 根据原始模型格式的不同分别调用不同的转换函数
        if (modelPath.model == modelConfig::CAFFE) {
            caffe2MNNNet(modelPath.prototxtFile, modelPath.modelFile, modelPath.bizCode, options, netT);
        } else if (modelPath.model == modelConfig::TENSORFLOW) {
            tensorflow2MNNNet(modelPath.modelFile, modelPath.bizCode, options, netT);
        } else if (modelPath.model == modelConfig::MNN) {
            addBizCode(modelPath.modelFile, modelPath.bizCode, options, netT);
        } else if (modelPath.model == modelConfig::ONNX) {
            onnx2MNNNet(modelPath.modelFile, modelPath.bizCode, options, netT);
        } else if (modelPath.model == modelConfig::TFLITE) {
            tflite2MNNNet(modelPath.modelFile, modelPath.bizCode, options, netT);
#ifdef MNN_BUILD_TORCHSCRIPT
        } else if (modelPath.model == modelConfig::TORCHSCRIPT) {
            torchscript2MNNNet(modelPath.modelFile, modelPath.bizCode, options, netT);
#endif
        } else {
            std::cout << "Not Support Model Type" << std::endl;
        }

        if (modelPath.model != modelConfig::MNN) {
            std::cout << "Start to Optimize the MNN Net..." << std::endl;
            // 对转换的MNN模型进行优化
            std::unique_ptr<MNN::NetT> newNet = optimizeNet(netT, modelPath.forTraining);
            writeFb(newNet, modelPath.MNNModel, modelPath);
        } else {
            writeFb(netT, modelPath.MNNModel, modelPath);
        }
    } catch (const cxxopts::OptionException &e) {
        std::cerr << "Error while parsing options! " << std::endl;
        std::cerr << e.what() << std::endl;
        exit(EXIT_FAILURE);
    }
    catch (const std::runtime_error &e) {
      std::cerr << "Error while converting the model! " << std::endl;
      std::cerr << e.what() << std::endl;
      exit(EXIT_FAILURE);
    }
    std::cout << "Converted Done!" << std::endl;

    return 0;
}

a. 原始模型解析及MNN模型转换
MNN模型格式是用flatbuffers的语法格式定义的,转换为c++语言如下:

// 反序列化的网络Net
struct NetT : public flatbuffers::NativeTable {
  typedef Net TableType;
  std::string bizCode;
  std::vector<std::unique_ptr<TensorDescribeT>> extraTensorDescribe;
  std::unique_ptr<GpuLibraryT> gpulibrary;
  std::vector<std::unique_ptr<OpT>> oplists;
  std::vector<std::string> outputName;
  ForwardType preferForwardType;
  NetSource sourceType;
  std::vector<std::string> tensorName;
  int32_t tensorNumber;
  Usage usage;
  std::vector<std::unique_ptr<SubGraphProtoT>> subgraphs;
  NetT()
      : preferForwardType(ForwardType_CPU),
        sourceType(NetSource_CAFFE),
        tensorNumber(0),
        usage(Usage_INFERENCE) {
  }
};

// 序列化的算子OpT
struct OpT : public flatbuffers::NativeTable {
  typedef Op TableType;
  std::vector<int32_t> inputIndexes;
  OpParameterUnion main;
  std::string name;
  std::vector<int32_t> outputIndexes;
  OpType type;
  MNN_DATA_FORMAT defaultDimentionFormat;
  OpT()
      : type(OpType_AbsVal),
        defaultDimentionFormat(MNN_DATA_FORMAT_NHWC) {
  }
};

MNN支持caffe、tensorflow、onnx、tflite模型的输入,前三个模型都是依赖protobuf库进行序列化和保存的,下面以caffe模型的转换为例深入分析,caffe的模型结构是用protobuf语法定义的见文件caffe.proto,按照层级结构主要分为NetParameter、LayerParameter。
caffe模型转换的入口在caffeConverter.cpp文件:

// 入参caffe模型结构、权重文件,生成未序列化的MNN网络NetT
int caffe2MNNNet(const std::string prototxtFile, const std::string modelFile, const std::string bizCode,
                 const common::Options& options, std::unique_ptr<MNN::NetT>& netT) {
    caffe::NetParameter caffeProtxt;
    caffe::NetParameter caffeModel;
    // 通过protobuf的接口分别读入模型结构文件和权重文件
    bool succ = read_proto_from_text(prototxtFile.c_str(), &caffeProtxt);
    DCHECK(succ) << "read_proto_from_text failed";

    succ = read_proto_from_binary(modelFile.c_str(), &caffeModel);
    DCHECK(succ) << "read_proto_from_binary failed";
    std::map<std::string, int> tensorName; // 保存每个tensor的name与其在整图的index

    // Load Parameters
    // MNN::NetT netT;
    // Add Extra Input
    // TODO Support shape
    // 首先解析输入节点
    if (caffeProtxt.input_size() > 0) {
        for (int v = 0; v < caffeProtxt.input_size(); ++v) {
            if (caffeProtxt.input_dim_size() <= 0) {
                continue;
            }
            MNN::OpT* op  = new MNN::OpT;  // 创建MNN的输入节点
            op->name      = caffeProtxt.input(v);
            op->type      = MNN::OpType_Input;
            op->main.type = MNN::OpParameter_Input;
            auto inputT   = new MNN::InputT;
            for (int i = 0; i < caffeProtxt.input_dim_size(); ++i) {
                inputT->dims.push_back(caffeProtxt.input_dim(i));
            }
            op->main.value = inputT;
            op->outputIndexes.push_back(v);

            netT->oplists.emplace_back(op);
            netT->tensorName.push_back(op->name);
            tensorName.insert(std::make_pair(op->name, v));
        }
    }
    if (caffeProtxt.input_shape_size() > 0) {
        for (int v = 0; v < caffeProtxt.input_shape_size(); ++v) {
            MNN::OpT* op  = new MNN::OpT;
            op->name      = caffeProtxt.input(v);
            op->type      = MNN::OpType_Input;
            op->main.type = MNN::OpParameter_Input;
            auto inputT   = new MNN::InputT;
            auto shape    = caffeProtxt.input_shape(v);
            for (int i = 0; i < shape.dim_size(); ++i) {
                inputT->dims.push_back(shape.dim(i));
            }
            op->main.value = inputT;
            op->outputIndexes.push_back(v);

            netT->oplists.emplace_back(op);
            netT->tensorName.push_back(op->name);
            tensorName.insert(std::make_pair(op->name, v));
        }
    }

    // Compute TensorCount
    {
        // 将caffe 网络每一层输出tensor的name保存在net中
        for (int l = 0; l < caffeProtxt.layer_size(); ++l) {
            auto& layer = caffeProtxt.layer(l);
            for (int t = 0; t < layer.top_size(); ++t) {
                auto name = layer.top(t);
                if (tensorName.find(name) == tensorName.end()) {
                    tensorName.insert(std::make_pair(layer.top(t), tensorName.size()));
                    netT->tensorName.push_back(name);
                }
            }
        }
    }
	
	// 逐层解析caffe每一个layer的参数
    for (int l = 0; l < caffeProtxt.layer_size(); ++l) {
        MNN::OpT* op = new MNN::OpT;
        auto& layer  = caffeProtxt.layer(l);
        op->name     = layer.name();
        // Input Output
        for (int t = 0; t < layer.top_size(); ++t) {
            op->outputIndexes.emplace_back(tensorName.find(layer.top(t))->second);
        }

        for (int t = 0; t < layer.bottom_size(); ++t) {
            op->inputIndexes.emplace_back(tensorName.find(layer.bottom(t))->second);
        }
		// 获取注册好的算子转换函数
        auto creator = OpConverterSuit::get()->search(layer.type());
        if (nullptr == creator) {
            LG << "Don't support type [ " << layer.type().c_str() << " ], for " << layer.name().c_str();
            delete op;
            break;
        }
        const caffe::LayerParameter* layerP = nullptr;
        for (int v = 0; v < caffeModel.layer_size(); ++v) {
            auto& l = caffeModel.layer(v); // caffe权重
            if (l.name() == layer.name()) {
                layerP = &l;
                break;
            }
        }
        if (NULL == layerP) {
            layerP = &layer;
        }
        op->type      = creator->opType();
        op->main.type = creator->type();
		// 算子转换函数将原始网络数据转换到mnn算子中
        creator->run(op, layer, *layerP);

        netT->oplists.emplace_back(op);
    }
    netT->sourceType = MNN::NetSource_CAFFE;
    netT->bizCode    = bizCode;

    return 0;
}

caffe算子到MNN算子的转换机制,工厂注册模式

// 模板类,算子转换的注册类
template <class T>
class OpConverterRegister {
public:
    OpConverterRegister(const char* claim) {
        T* test             = new T;
        OpConverterSuit* ts = OpConverterSuit::get();
        ts->insert(test, claim);
    }
    ~OpConverterRegister() {
    }
};
// 算子转换类的管理类,核心成员是一个map,保存算子类型与对应的转换类
class OpConverterSuit {
public:
    static OpConverterSuit* get();
    void insert(OpConverter* t, const char* name);

    OpConverter* search(const std::string& name);

    OpConverterSuit() {
    }
    ~OpConverterSuit();

private:
    static OpConverterSuit* global;
    std::map<std::string, OpConverter*> mTests;
};
// 算子转换基类,核心函数run、type、opType,每个算子的转换函数继承该基类并实现自己的这三个函数。
class OpConverter {
    friend class OpConverterSuit;

public:
    virtual void run(MNN::OpT* dstOp, const caffe::LayerParameter& parameters, const caffe::LayerParameter& weight) = 0;
    virtual MNN::OpParameter type()                                                                                 = 0;
    virtual MNN::OpType opType()                                                                                    = 0;
    OpConverter() {
    }
    virtual ~OpConverter() {
    }

private:
};

// 卷积层的注册位置
static OpConverterRegister<Convolution> a("Convolution");
// 卷积层的转换过程
class Convolution : public ConvolutionCommon {
public:
    virtual void run(MNN::OpT* dstOp, const caffe::LayerParameter& parameters, const caffe::LayerParameter& weight) {
        ConvolutionCommon::run(dstOp, parameters, weight);
        auto weightBlob = weight.blobs(0);

        auto convolution2D = dstOp->main.AsConvolution2D();
        int size           = 1;
        if (weightBlob.has_shape()) {
            for (int i = 0; i < weightBlob.shape().dim_size(); ++i) {
                size *= weightBlob.shape().dim(i);
            }
        } else {
            size = weightBlob.num() * weightBlob.channels() * weightBlob.height() * weightBlob.width();
        }

        std::vector<float> weightData;
        weightData.resize(size);
        for (int i = 0; i < size; ++i) {
            weightData[i] = weightBlob.data(i);
        }
        convolution2D->weight = weightData;

        auto& convProto = parameters.convolution_param();
        std::vector<float> biasData(convProto.num_output(), 0.0f);
        if (convProto.bias_term() && weight.blobs_size() >= 2) {
            for (int i = 0; i < biasData.size(); ++i) {
                biasData[i] = weight.blobs(1).data(i);
            }
        }
        convolution2D->bias = biasData;
    }
};

b. 模型优化
MNN模型优化入口在PostConverter.cpp文件中

std::unique_ptr<MNN::NetT> optimizeNet(std::unique_ptr<MNN::NetT>& originNet, bool forTraining) {
    if (originNet->sourceType == NetSource_TENSORFLOW) {
        GenerateSubGraph(originNet);
    }
    std::vector<MNN::SubGraphProtoT*> subgraphs;
    for (auto& subgraph : originNet->subgraphs) {
        subgraphs.push_back(subgraph.get());
    }
    OptimizeContext ctx;
    ctx.subgraphs = subgraphs;
    ctx.is_training = forTraining;
    ctx.verbose = true;
    ctx.source = originNet->sourceType;
    ctx.completed_subgraphs = {};
    ctx.RunOptimize = optimizeNetImpl;

    Global<OptimizeContext>::Reset(&ctx);

    std::unordered_map<std::string, VARP> empty;
    for (auto& subGraph : originNet->subgraphs) {
        CompleteSubGraph(empty, subGraph.get());
    }
    std::unique_ptr<MNN::NetT> net = ctx.RunOptimize(originNet, empty);
    fuseConstIntoSubgraph(net.get(), ctx.completed_subgraphs);
    for (auto* subgraph : ctx.completed_subgraphs) {
        net->subgraphs.emplace_back(subgraph);
    }
    return std::move(net);
}
  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值