解析.caffemodel生成.prototxt文件(!!!保证成功)

主要分两大步

复现经典工作,其源代码是caffe的,需要写成pytorch代码。已有.caffemodel文件,需首先将其解析出.prototxt文件,然后写成pytorch框架的。查了许多资料,要不写的有点乱,要不就是代码有错误。这里从Linux安装caffe开始,总结走过的路。执行过已成功!!!

第一步:安装caffe

这一步是参考的卡伊德的第二步"大杀器"
https://blog.csdn.net/chenzhiwen1998/article/details/119753195

第二步:快速解析.caffemodel文件

代码如下:(直接复制粘贴,只需改动两行——文件名)
代码来自https://blog.csdn.net/mdjxy63/article/details/81223217,但有错误,我debug后贴在下面。

# coding:utf-8
# 首先请确保编译了caffe的python接口,以及编译后的输出目录<caffe_root>/python加载到了PYTHONPATH环境变量中. 或者,在代码中向os.path中添加
 
import caffe.proto.caffe_pb2 as caffe_pb2      # 载入caffe.proto编译生成的caffe_pb2文件
 
# 载入模型,根据网络结构模型,结合caffemodel,生成prototxt文件
caffemodel_filename = 'yourfilename.caffemodel'     # 改成你自己的.caffemodel文件
model = caffe_pb2.NetParameter()
 
f = open(caffemodel_filename, 'rb')
model.ParseFromString(f.read())
f.close()
 
layers = model.layer
layer_id=-1
 
result_file='yourfilename.prototxt'               # 改成你自定义的.prototxt文件名
 
for layer in layers:#遍历每一层
    layer_id = layer_id + 1
 
    res = list()
 
    # name
    res.append('layer {')
    res.append('  name: "%s"' % layer.name)
 
    # type
    res.append('  type: "%s"' % layer.type)
 
    # bottom
    for bottom in layer.bottom:
        res.append('  bottom: "%s"' % bottom)
 
    # top
    for top in layer.top:
        res.append('  top: "%s"' % top)
 
    # loss_weight
    for loss_weight in layer.loss_weight:
        res.append('  loss_weight: "%s"' % loss_weight)
 
    # param
    for param in layer.param:
        param_res = list()
        if param.lr_mult is not None:
            param_res.append('    lr_mult: %s' % param.lr_mult)
        if param.decay_mult != 1:
            param_res.append('    decay_mult: %s' % param.decay_mult)
        if len(param_res) > 0:
            res.append('  param{')
            res.extend(param_res)
            res.append('  }')
 
    # lrn_param
    if layer.lrn_param is not None:
        lrn_res = list()
        if layer.lrn_param.local_size != 5:#这个值应该是默认值,如果不等于这个默认值的话,那么就需要添加
            lrn_res.append('    local_size: %d' % layer.lrn_param.local_size)
        if layer.lrn_param.alpha != 1:
            lrn_res.append('    alpha: %f' % layer.lrn_param.alpha)
        if layer.lrn_param.beta != 0.75:
            lrn_res.append('    beta: %f' % layer.lrn_param.beta)
        NormRegionMapper = {'0': 'ACROSS_CHANNELS', '1': 'WITHIN_CHANNEL'}
        if layer.lrn_param.norm_region != 0:
            lrn_res.append('    norm_region: %s' % NormRegionMapper[str(layer.lrn_param.norm_region)])
        EngineMapper = {'0': 'DEFAULT', '1': 'CAFFE', '2': 'CUDNN'}
        if layer.lrn_param.engine != 0:
            lrn_res.append('    engine: %s' % EngineMapper[str(layer.lrn_param.engine)])
        if len(lrn_res) > 0:
            res.append('  lrn_param{')
            res.extend(lrn_res)
            res.append('  }')
 
    # include
    if len(layer.include) > 0:
        include_res = list()
        includes = layer.include
        phase_mapper = {
            '0': 'TRAIN',
            '1': 'TEST'
        }
 
        for include in includes:
            if include.phase is not None:
                include_res.append('    phase: %s' % phase_mapper[str(include.phase)])
 
        if len(include_res) > 0:
            res.append('  include {')
            res.extend(include_res)
            res.append('  }')
 
    # transform_param
    if layer.transform_param is not None:
        transform_param_res = list()
        if layer.transform_param.scale != 1:
            transform_param_res.append('    scale: %s' % layer.transform_param.scale)
        if layer.transform_param.mirror != False:
            transform_param_res.append('    mirror: ' + layer.transform_param.mirror)
        if len(transform_param_res) > 0:
            res.append('  transform_param {')
            res.extend(transform_param_res)
            res.append('  }')
 
    # data_param
    if layer.data_param is not None and (
                layer.data_param.source != "" or layer.data_param.batch_size != 0 or layer.data_param.backend != 0):
        data_param_res = list()
        if layer.data_param.source is not None:
            data_param_res.append('    source: "%s"' % layer.data_param.source)
        if layer.data_param.batch_size is not None:
            data_param_res.append('    batch_size: %d' % layer.data_param.batch_size)
        if layer.data_param.backend is not None:
            data_param_res.append('    backend: %s' % layer.data_param.backend)
 
        if len(data_param_res) > 0:
            res.append('  data_param: {')
            res.extend(data_param_res)
            res.append('  }')
 
    # convolution_param
    if layer.convolution_param is not None:
        convolution_param_res = list()
        conv_param = layer.convolution_param
        if conv_param.num_output != 0:
            convolution_param_res.append('    num_output: %d' % conv_param.num_output)
        if len(conv_param.kernel_size) > 0:
            for kernel_size in conv_param.kernel_size:
                convolution_param_res.append('    kernel_size: %d' % kernel_size)
        if len(conv_param.pad) > 0:
            for pad in conv_param.pad:
                convolution_param_res.append('    pad: %d' % pad)
        if len(conv_param.stride) > 0:
            for stride in conv_param.stride:
                convolution_param_res.append('    stride: %d' % stride)
        if conv_param.weight_filler is not None and conv_param.weight_filler.type != 'constant':
            convolution_param_res.append('    weight_filler {')
            convolution_param_res.append('      type: "%s"' % conv_param.weight_filler.type)
            convolution_param_res.append('    }')
        if conv_param.bias_filler is not None and conv_param.bias_filler.type != 'constant':
            convolution_param_res.append('    bias_filler {')
            convolution_param_res.append('      type: "%s"' % conv_param.bias_filler.type)
            convolution_param_res.append('    }')
 
        if len(convolution_param_res) > 0:
            res.append('  convolution_param {')
            res.extend(convolution_param_res)
            res.append('  }')
 
    # pooling_param
    if layer.pooling_param is not None:
        pooling_param_res = list()
        if layer.pooling_param.kernel_size > 0:
            pooling_param_res.append('    kernel_size: %d' % layer.pooling_param.kernel_size)
            pooling_param_res.append('    stride: %d' % layer.pooling_param.stride)
            pooling_param_res.append('    pad: %d' % layer.pooling_param.pad)
            PoolMethodMapper = {'0': 'MAX', '1': 'AVE', '2': 'STOCHASTIC'}
            pooling_param_res.append('    pool: %s' % PoolMethodMapper[str(layer.pooling_param.pool)])
 
        if len(pooling_param_res) > 0:
            res.append('  pooling_param {')
            res.extend(pooling_param_res)
            res.append('  }')
 
    # inner_product_param
    if layer.inner_product_param is not None:
        inner_product_param_res = list()
        if layer.inner_product_param.num_output != 0:
            inner_product_param_res.append('    num_output: %d' % layer.inner_product_param.num_output)
 
        if len(inner_product_param_res) > 0:
            res.append('  inner_product_param {')
            res.extend(inner_product_param_res)
            res.append('  }')
 
    # drop_param
    if layer.dropout_param is not None:
        dropout_param_res = list()
        # if layer.dropout_param.dropout_ratio != 0.5 or layer.dropout_param.scale_train != True:
        if layer.dropout_param.dropout_ratio != 0.5:
            dropout_param_res.append('    dropout_ratio: %f' % layer.dropout_param.dropout_ratio)
            # dropout_param_res.append('    scale_train: ' + str(layer.dropout_param.scale_train))
 
        if len(dropout_param_res) > 0:
            res.append('  dropout_param {')
            res.extend(dropout_param_res)
            res.append('  }')
 
    res.append('}')
 
    with open(result_file, 'a+') as fd:
        for line in res:
            # print line
            fd.writelines('\n'+line)
 

最后贴出一个好用的prototxt在线可视化网页
https://lutzroeder.github.io/netron/
该工具开源地址:https://github.com/lutzroeder/netron
主要是写下来不重蹈覆辙,也希望对大家有绵薄的帮助,祝大家前程似锦,事业有成!!!

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值