I encountered an error while inferring with the custom hef file:
Assertion failed: ‘Condition ‘input.size() == 1’ is not met: input.size() is 3’. dg_postprocess_detection.cpp: 1849 [DG::DetectionPostprocessYoloHailo::forward]
I encountered an error while inferring with the custom hef file:
Assertion failed: ‘Condition ‘input.size() == 1’ is not met: input.size() is 3’. dg_postprocess_detection.cpp: 1849 [DG::DetectionPostprocessYoloHailo::forward]
(hailo_virtualenv) hailo@LAPTOP-JILEML96:/local/workspace/hailos/Assets$ hailo parse-hef ./models//mats_v11n_05/mats_v11n_05.hef
[info] No GPU chosen and no suitable GPU found, falling back to CPU.
[info] Current Time: 10:18:06, 11/21/25
[info] CPU: Architecture: x86_64, Model: AMD Ryzen 7 5800H with Radeon Graphics, Number Of Cores: 16, Utilization: 0.6%
[info] Memory: Total: 14GB, Available: 12GB
[info] System info: OS: Linux, Kernel: 6.6.87.2-microsoft-standard-WSL2
[info] Hailo DFC Version: 3.33.0
[info] HailoRT Version: 4.23.0
[info] PCIe: No Hailo PCIe device was found
[info] Running hailo parse-hef ./models//mats_v11n_05/mats_v11n_05.hef
(hailo) Running command ‘parse-hef’ with ‘hailortcli’
Architecture HEF was compiled for: HAILO8L
Network group name: mats_v11n_05, Multi Context - Number of contexts: 4
Network name: mats_v11n_05/mats_v11n_05
VStream infos:
Input mats_v11n_05/input_layer1 UINT8, NHWC(640x640x3)
Output mats_v11n_05/conv51 UINT8, NHWC(80x80x3)
Output mats_v11n_05/conv59 UINT8, NHWC(40x40x3)
Output mats_v11n_05/conv71 UINT8, FCR(20x20x3)
General imports used throughout the tutorial
import tensorflow as tf
from IPython.display import SVG
from hailo_sdk_client import ClientRunner, InferenceContext
import os
import cv2
import numpy as np
from PIL import Image
input_size = 640 #模型输入的尺寸
bath_size = 1 #批次大小
chosen_hw_arch = “hailo8l” #要使用的 Hailo 硬件架构,这里是 Hailo-8L
#onnx_model_name = “fruits_v8n_01” #模型的名字
onnx_model_name = “mats_v11n_05” #模型的名字
onnx_path = f"./models/{onnx_model_name}/{onnx_model_name}.onnx" #模型的路径
hailo_model_har_path = f"./models/{onnx_model_name}/{onnx_model_name}_hailo_model.har" #转换后模型的保存路径
hailo_quantized_har_path = f"./models/{onnx_model_name}/{onnx_model_name}_hailo_quantized_model.har" #量化后模型的保存路径
hailo_model_hef_path = f"./models/{onnx_model_name}/{onnx_model_name}.hef" #编译后模型的保存路径
images_path = f"./data/{onnx_model_name}/images" #数据集图像路径
#将 onnx 模型转为 har
print(“----------------将 onnx 模型转为 har----------------”)
runner = ClientRunner(hw_arch=chosen_hw_arch)
hn, npz = runner.translate_onnx_model(
model=onnx_path,
net_name=onnx_model_name,
start_node_names=[‘/model.0/conv/Conv’],
#end_node_names=[‘/model.22/cv2.0/cv2.0.2/Conv’, ‘/model.22/cv3.0/cv3.0.2/Conv’, ‘/model.22/cv2.1/cv2.1.2/Conv’],
#end_node_names=[‘/model.23/Concat_3’],
#end_node_names=[‘/model.23/cv2.0/cv2.0.2/Conv’, ‘/model.23/cv3.0/cv3.0.2/Conv’, ‘/model.23/cv2.1/cv2.1.2/Conv’],
end_node_names=[‘/model.23/cv3.0/cv3.0.2/Conv’, ‘/model.23/cv3.1/cv3.1.2/Conv’, ‘/model.23/cv3.2/cv3.2.2/Conv’],
net_input_shapes={‘/model.0/conv/Conv’: [bath_size, 3, input_size, input_size]},
)
runner.save_har(hailo_model_har_path)
#校准数据集准备
print(“----------------校准数据集准备----------------”)
images_list = [img_name for img_name in os.listdir(images_path) if os.path.splitext(img_name)[1] in [“.jpg”, “.png”, “bmp”]][:1500] # 获取图像名称列表
calib_dataset = np.zeros((len(images_list), input_size, input_size, 3)) # 初始化 numpy 数组
#print(f"calib_dataset dtype is {calib_dataset.dtype}")
for idx, img_name in enumerate(sorted(images_list)):
#img = cv2.imread(os.path.join(images_path, img_name))
#resized = cv2.resize(img, (input_size, input_size)) # 调整原始图像的尺寸为模型输入的尺寸
#calib_dataset[idx,:,:,:]=np.array(resized)
img = Image.open(os.path.join(images_path, img_name))
resized_image = np.array(img.resize((input_size, input_size), Image.Resampling.BILINEAR))
calib_dataset[idx, :, :, :] = resized_image
#量化模型
print(“----------------量化模型----------------”)
runner = ClientRunner(har=hailo_model_har_path)
alls_lines = [
‘model_optimization_flavor(optimization_level=1, compression_level=0)’#, #compression_level(压缩水平)–值越大,越降低精度准确性
#‘resources_param(max_control_utilization=0.9, max_compute_utilization=0.9, max_memory_utilization=0.9)’ # ,
#‘performance_param(fps=5)’
]
runner.load_model_script(‘\n’.join(alls_lines))
runner.optimize(calib_dataset)
runner.save_har(hailo_quantized_har_path)
#编译为 hef
print(“----------------编译为 hef----------------”)
runner = ClientRunner(har=hailo_quantized_har_path)
compiled_hef = runner.compile()
with open(hailo_model_hef_path, “wb”) as f:
f.write(compiled_hef)