背景
- 有时候我们需要对模型推理的中间层结果进行对比分析,这时候如何方便快捷地将指定中间层的结果保存下来就很重要了。
- 具体方法如下:
- 核心:在模型推理之前将指定的中间层/所有的层放到输出节点中
代码
from onnx import load_model, save_model
import onnx
import onnxruntime as rt
import torch
import numpy as np
from collections import OrderedDict
np.set_printoptions(threshold=np.inf)
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def read_file(N, C, H, W, bin_path):
input_size = N * C * H * W
file = open(bin_path, "rb")
data = np.fromfile(file, dtype=np.uint8)
data = data.astype(np.float32) / 255
data_tensor = torch.from_numpy(data[0:input_size])
return data_tensor.reshape(N, C, H, W)
model = onnx.load('./model.onnx')
for node in model.graph.node:
for output in node.output:
model.graph.output.extend([onnx.ValueInfoProto(name=output)])
print('node is: ', onnx.ValueInfoProto(name=output))
print(model.graph.output)
session = rt.InferenceSession(model.SerializeToString())
x1 = read_file(1, 1, 128, 128, "./input1.bin")
ort_inputs = {session.get_inputs()[0].name: to_numpy(x1)}
ort_out = session.run(None, ort_inputs)
print(ort_out)
outputs = [x.name for x in session.get_outputs()]
ort_out = OrderedDict(zip(outputs, ort_out))