Hi everyone,
I’m looking for a stereo depth model that can run in real-time (100–200 ms) on a Raspberry Pi 5 with a Hailo-8*accelerator.
I tried the stereo model from the hailo_model_zoo
, but inference takes about 800 ms which is too slow for my use case.
So I started testing other stereo depth models like FastACVNet, CREStereo, and HitNet using the following conversion code:
import os
import onnx
from onnxsim import simplify
import numpy as np
from hailo_sdk_client import ClientRunner
from PIL import Image
def parse_stereo_onnx(onnx_path: str, left_dir: str, right_dir: str, net_name: str, hw_arch: str, target_size=(640, 480)):
# -------------------------------
# Step 1. Simplify the ONNX model
# -------------------------------
model = onnx.load(onnx_path)
model_simp, check = simplify(model)
if not check:
raise RuntimeError("Simplified ONNX model validation failed.")
simplified_onnx_path = os.path.splitext(onnx_path)[0] + "_simplified.onnx"
onnx.save(model_simp, simplified_onnx_path)
print(f"[✓] Simplified ONNX model saved to: {simplified_onnx_path}")
# -----------------------------------------------------
# Step 2. Translate the simplified ONNX model to Hailo format
# -----------------------------------------------------
runner = ClientRunner(hw_arch=hw_arch)
# Précise les noms des entrées et sorties comme dans le YAML
input_names = ["left_image", "right_image"]
output_names = ["output"]
hn, params = runner.translate_onnx_model(simplified_onnx_path, net_name,
input_names=input_names,
outputs_names=output_names)
print("[✓] Model translation to Hailo format completed.")
# -----------------------------------------------------
# Step 3. Load stereo calibration dataset
# -----------------------------------------------------
calib_dataset = load_stereo_calib_dataset(left_dir, right_dir, target_size)
print("[✓] Calibration dataset prepared.")
# -----------------------------------------------------
# Step 4. Optimize the model (quantization)
# -----------------------------------------------------
runner.optimize(calib_dataset)
print("[✓] Model quantization complete.")
# -----------------------------------------------------
# Step 5. Save HAR file
# -----------------------------------------------------
har_file = f"{net_name}_quantized.har"
runner.save_har(har_file)
print(f"[✓] HAR file saved to: {har_file}")
def load_stereo_calib_dataset(left_dir, right_dir, target_size=(640, 480)):
"""
Charge et redimensionne les images gauche/droite pour la calibration.
Retourne un dict: { "left_image": [...], "right_image": [...] }
"""
left_imgs = sorted([f for f in os.listdir(left_dir) if f.lower().endswith(('.jpg', '.png'))])
right_imgs = sorted([f for f in os.listdir(right_dir) if f.lower().endswith(('.jpg', '.png'))])
assert len(left_imgs) == len(right_imgs), "Nombre d'images gauche/droite différent"
images_left = []
images_right = []
for l_img, r_img in zip(left_imgs, right_imgs):
l_path = os.path.join(left_dir, l_img)
r_path = os.path.join(right_dir, r_img)
left = Image.open(l_path).convert("RGB").resize(target_size, Image.BILINEAR)
right = Image.open(r_path).convert("RGB").resize(target_size, Image.BILINEAR)
images_left.append(np.array(left, dtype=np.float32))
images_right.append(np.array(right, dtype=np.float32))
return {
"left_image": np.stack(images_left),
"right_image": np.stack(images_right)
}
def main():
onnx_path = "./model/crestereo_combined_iter10_480x640.onnx"
calib_left = "./calibration/left"
calib_right = "./calibration/right"
net_name = "stereo_depth_net"
hw_arch = "hailo8l"
parse_stereo_onnx(onnx_path, calib_left, calib_right, net_name, hw_arch)
if __name__ == "__main__":
main()
and i have some issue like
[warning] This model has non-default (reflective/edge) padding layers which are not supported currently, and were replaced with zero padding.
or
ValueError: axes don't match array
This seems to occur during model translation. Has anyone encountered and resolved this when working with stereo ONNX models on Hailo?
I try
hailo tutorial
to get some informations but i get
[C 2025-06-11 10:26:16.209 ServerApp] Running as root is not recommended. Use --allow-root to bypass.
After this, the Jupyter server crashes. Is there a clean way to launch the tutorial environment inside Docker?
Any suggestions, tips, or shared experiences would be greatly appreciated!
Thanks in advance!