def get_layer_output(model, image):
ori_output = copy.deepcopy(model.graph.output)
for node in model.graph.node:
for output in node.output:
model.graph.output.extend([onnx.ValueInfoProto(name=output)])
ort_session = rt.InferenceSession(model.SerializeToString(),providers=["CPUExecutionProvider"])
ort_inputs = {}
for i, input_ele in enumerate(ort_session.get_inputs()):
ort_inputs[input_ele.name] = image
outputs = [x.name for x in ort_session.get_outputs()]
ort_outs = ort_session.run(outputs, ort_inputs)
print(type(ort_outs))
print(type(ort_outs[0]))
# print("ort_outs[0]",ort_outs[0])
# exit()
# ort_outs = OrderedDict(zip(outputs, ort_outs))
return ort_outs
model_path = "/data_alpha/test_1221/MobileSAM-pytorch/exp/adamw_lr_1e-3_wd_5e-4_bs_8_epoch_16/ckpt/tiny_2262.onnx"
onnx_model = onnx.load(model_path)
# print(outputs)
intermediate_layer_names = [onnx_model.graph.node[i].name for i in range(len(onnx_model.graph.node))]
# print(intermediate_layer_names)
for layer_name, output_data in zip(intermediate_layer_names, get_layer_output(onnx_model,tensor_input_2)):
if "Conv_" not in layer_name:
continue
# output_file_path = f"/tmp/onnx_txt/{layer_name}.txt"
# output_file_path = f"{output_folder}/{layer_name}.txt"
# np.savetxt(output_file_path, output_data.flatten().astype(np.float32), fmt='%f')
# exit()
标签:输出,layer,outs,name,onnxruntime,ort,中间层,output,model
From: https://www.cnblogs.com/WEIWEI1095/p/17981736