1. 依赖
- protobuf
- onnx模型
2. 探索
- 采用protobuf的方式。其中 onnx.proto 就是定义的序列化文件
protoc --decode=onnx.ModelProto onnx.proto < yourfile.onnx
打印出来的结果:
ir_version: 4
producer_name: "pytorch"
producer_version: "1.1"
graph {
node {
input: "inputx"
input: "conv1.weight"
output: "14"
op_type: "Conv"
attribute {
name: "dilations"
ints: 1
ints: 1
type: INTS
}
attribute {
name: "group"
i: 1
type: INT
}
attribute {
name: "kernel_shape"
ints: 3
ints: 3
type: INTS
}
attribute {
name: "pads"
ints: 1
ints: 1
ints: 1
ints: 1
type: INTS
}
attribute {
name: "strides"
ints: 2
ints: 2
type: INTS
}
}
node {
input: "14"
input: "bn1.weight"
input: "bn1.bias"
input: "bn1.running_mean"
input: "bn1.running_var"
output: "15"
op_type: "BatchNormalization"
attribute {
name: "epsilon"
f: 1e-05
type: FLOAT
}
attribute {
name: "momentum"
f: 0.9
type: FLOAT
}
}
node {
input: "15"
input: "prelu.weight"
output: "16"
op_type: "PRelu"
}
node {
input: "16"
input: "conv2.weight"
output: "17"
op_type: "Conv"
attribute {
name: "dilations"
ints: 1
ints: 1
type: INTS
}
attribute {
name: "group"
i: 1
type: INT
}
attribute {
name: "kernel_shape"
ints: 3
ints: 3
type: INTS
}
attribute {
name: "pads"
ints: 1
ints: 1
ints: 1
ints: 1
type: INTS
}
attribute {
name: "strides"
ints: 2
ints: 2
type: INTS
}
}
node {
input: "17"
input: "bn2.weight"
input: "bn2.bias"
input: "bn2.running_mean"
input: "bn2.running_var"
output: "18"
op_type: "BatchNormalization"
attribute {
name: "epsilon"
f: 1e-05
type: FLOAT
}
attribute {
name: "momentum"
f: 0.9
type: FLOAT
}
}
node {
input: "18"
input: "prelu.weight"
output: "outputy"
op_type: "PRelu"
}
name: "torch-jit-export"
initializer {
dims: 64
data_type: 1
name: "bn1.bias"
raw_data: "\000\000\000\}
input {
name: "inputx"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 64
}
dim {
dim_value: 64
}
}
}
}
}
input {
name: "conv1.weight"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
dim {
dim_value: 3
}
dim {
dim_value: 3
}
dim {
dim_value: 3
}
}
}
}
}
input {
name: "bn1.weight"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "bn1.bias"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "bn1.running_mean"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "bn1.running_var"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "conv2.weight"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
dim {
dim_value: 64
}
dim {
dim_value: 3
}
dim {
dim_value: 3
}
}
}
}
}
input {
name: "bn2.weight"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "bn2.bias"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "bn2.running_mean"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "bn2.running_var"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
input {
name: "prelu.weight"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 64
}
}
}
}
}
output {
name: "outputy"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 64
}
dim {
dim_value: 16
}
dim {
dim_value: 16
}
}
}
}
}
}
opset_import {
version: 9
}
采用这种方式,推荐找一个只有几层网络的onnx模型,可以清晰看到onnx模型的内部组成,内部结构
- 采用netron可视化
这个只需要安装netron之后,就ok了。方便用于检查:数据维度,数值,连接关系等