Hello!! I am trying to run 2 pipelines, the depth_pipeline as well as detection_pipeline at the same time to detect closest objects.
I have a custom script to do this, but I am facing an error. I have cloned the hailo-rpi5-examples and set up all the dependencies such as hailort also.
This is my code:
import gi
gi.require_version(‘Gst’, ‘1.0’)
from gi.repository import Gst
import os
import setproctitle
from hailo_apps_infra.hailo_rpi_common import (
detect_hailo_arch,
get_default_parser,
get_caps_from_pad,
get_numpy_from_buffer,
app_callback_class,
)
from hailo_apps_infra.gstreamer_app import GStreamerApp
import numpy as np
import cv2
import hailo
Gst.init(None)
Global dictionaries to store outputs with timestamps
yolo_detections = {}
depth_maps = {}
def yolo_callback(pad, info, user_data):
“”“Callback for YOLO branch to extract detections and store with timestamp.”“”
buffer = info.get_buffer()
if buffer is None:
return Gst.PadProbeReturn.OK
timestamp = buffer.pts
roi = hailo.get_roi_from_buffer(buffer)
detections = roi.get_objects_typed(hailo.HAILO_DETECTION)
yolo_detections[timestamp] = detections
if timestamp in depth_maps:
combine_and_display(timestamp)
return Gst.PadProbeReturn.OK
def depth_callback(pad, info, user_data):
“”“Callback for depth branch to extract depth map and store with timestamp.”“”
buffer = info.get_buffer()
if buffer is None:
return Gst.PadProbeReturn.OK
timestamp = buffer.pts
format, width, height = get_caps_from_pad(pad)
depth_map = get_numpy_from_buffer(buffer, format, width, height)
depth_maps[timestamp] = depth_map
if timestamp in yolo_detections:
combine_and_display(timestamp)
return Gst.PadProbeReturn.OK
def combine_and_display(timestamp):
“”“Combine YOLO detections and depth map for the same timestamp and display.”“”
if timestamp in yolo_detections and timestamp in depth_maps:
detections = yolo_detections.pop(timestamp)
depth_map = depth_maps.pop(timestamp)
# Normalize depth map if it's float32
if depth_map.dtype == np.float32:
depth_map = (depth_map - np.min(depth_map)) / (np.max(depth_map) - np.min(depth_map)) * 255
depth_map = depth_map.astype(np.uint8)
# Convert to BGR for display if grayscale
if len(depth_map.shape) == 2:
depth_map_bgr = cv2.cvtColor(depth_map, cv2.COLOR_GRAY2BGR)
else:
depth_map_bgr = depth_map[:, :, ::-1] # Convert RGB to BGR if needed
# Overlay YOLO bounding boxes
for det in detections:
bbox = det.get_bbox()
label = det.get_label()
conf = det.get_confidence()
# Scale bbox coordinates to depth map dimensions
x1, y1 = int(bbox.xmin() * depth_map_bgr.shape[1]), int(bbox.ymin() * depth_map_bgr.shape[0])
x2, y2 = int(bbox.xmax() * depth_map_bgr.shape[1]), int(bbox.ymax() * depth_map_bgr.shape[0])
cv2.rectangle(depth_map_bgr, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(depth_map_bgr, f"{label} {conf:.2f}", (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# Display the combined output
cv2.imshow("Combined Output", depth_map_bgr)
if cv2.waitKey(1) & 0xFF == ord('q'):
return Gst.PadProbeReturn.DROP
class CombinedApp(GStreamerApp):
def init(self, app_callback, user_data, parser=None):
if parser is None:
parser = get_default_parser()
super().__init__(parser, user_data)
self.arch = self.options_menu.arch or detect_hailo_arch()
if self.arch is None:
raise RuntimeError("Hailo architecture could not be auto-detected.")
self.app_callback = app_callback
setproctitle.setproctitle("Hailo Combined App")
# Define paths to HEF files and post-processing libraries
self.yolo_hef = "/home/nandy/hailo-rpi5-examples/resources/yolov8m.hef"
self.yolo_post_so = "/home/nandy/hailo-rpi5-examples/resources/libyolo_hailortpp_postprocess.so"
self.depth_hef = "/home/nandy/hailo-rpi5-examples/resources/scdepthv3.hef"
self.depth_post_so = "/home/nandy/hailo-rpi5-examples/resources/libdepth_postprocess.so"
# Video source (update to libcamerasrc for camera)
self.video_source = "filesrc location=/home/nandy/hailo-rpi5-examples/resources/example.mp4 ! decodebin ! videoconvert"
self.create_pipeline()
def get_pipeline_string(self):
return f"""
{self.video_source} ! tee name=t
t. ! queue max-size-buffers=5 ! videoconvert n-threads=3 ! videoscale ! video/x-raw,format=RGB,width=640,height=640 ! \
hailonet hef-path={self.yolo_hef} multi-process-service=true device-count=1 vdevice-group-id=1 batch-size=1 output-format-type=HAILO_FORMAT_TYPE_FLOAT32 ! \
hailofilter so-path={self.yolo_post_so} function-name=filter_letterbox ! queue max-size-buffers=5 ! identity name=yolo_callback ! hailomuxer.sink_0
t. ! queue max-size-buffers=5 ! videoconvert n-threads=3 ! videoscale ! video/x-raw,format=RGB,width=640,height=480 ! \ # Adjust for SC-DepthV3 if needed
hailonet hef-path={self.depth_hef} multi-process-service=true device-count=1 vdevice-group-id=1 batch-size=1 output-format-type=HAILO_FORMAT_TYPE_FLOAT32 ! \
hailofilter so-path={self.depth_post_so} function-name=filter_scdepth ! queue max-size-buffers=5 ! identity name=depth_callback ! hailomuxer.sink_1
hailomuxer name=hailomuxer ! fpsdisplaysink video-sink=fakesink sync=false
"""
if name == “main”:
user_data = app_callback_class()
app = CombinedApp(None, user_data)
pipeline = app.pipeline
# Add probes to yolo_callback and depth_callback pads
yolo_pad = pipeline.get_by_name("yolo_callback").get_static_pad("sink")
yolo_pad.add_probe(Gst.PadProbeType.BUFFER, yolo_callback, None)
depth_pad = pipeline.get_by_name("depth_callback").get_static_pad("sink")
depth_pad.add_probe(Gst.PadProbeType.BUFFER, depth_callback, None)
try:
app.run()
finally:
cv2.destroyAllWindows() # Ensure OpenCV windows are closed on exit
The error I am facing is
(venv_hailo_rpi5_examples) nandy@raspberrypi:~/hailo-rpi5-examples $ python basic_pipelines/custom.py
(python:4360): GStreamer-WARNING **: 12:13:34.002: Trying to link elements t and queue2 that don’t share a common ancestor: queue2 hasn’t been added to a bin or pipeline, and t is in pipeline0
Error creating pipeline: gst_parse_error: syntax error (0)
Please help me out.