Hello everyone in the community.
As mentioned in the title, my hardware setup consists of a Raspberry Pi 5 with 16GB RAM, an AI HAT+ board with Hailo (26 TOPS), running Pi OS 64-bit. I’ve been using the YOLO framework for a while, and I’m now starting to work with HAILO.
My challenge is finding a way to run an existing YOLO (version 8 or 11) script while leveraging my HAILO 8 for processing. I understand that the chip is only compatible with models in the “.hef” format. Additionally, I’m unable to download the Hailo Dataflow Compiler – Python package (whl) because I currently don’t have access to a machine that meets the required specifications.
Despite this, I know there are .hef models based on other yolo.pt models. Based on the example code below, could someone guide me on how to create a similar Python script, with the only difference being that the Hailo’s memory will be used to process the image analysis captured by my USB camera?
I currently don’t have any Raspberry Pi cameras available…
My example code:
from ultralytics import YOLO
import cv2
import math
import pygame
import threading
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320) #3, 640 best160
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240) #4, 480 best120
#cap.set(cv2.CAP_PROP_FPS, 60) #
frame_count = 0
process_interval = 10
# Model YOLO
model = YOLO("yolo11n.pt") #yolov5nu_ncnn_model
pygame.init()
pygame.mixer.init()
alarme_som = pygame.mixer.Sound("alarm.wav")
alarmeCtl = False
# Setup GPIO
out1 = 23
out2 = 24
#h = lgpio.gpiochip_open(0)
#lgpio.gpio_claim_output(h, out1)
#lgpio.gpio_claim_output(h, out2)
#def control_sig(on):
# if on:
# lgpio.gpio_write(h,out1,0)
# lgpio.gpio_write(h,out2,1)
# time.sleep(1)
# else:
# lgpio.gpio_write(h,out1,1)
# lgpio.gpio_write(h,out2,0)
def alarme():
global alarmeCtl
for _ in range(1):
alarme_som.play()
pygame.time.wait(250) # Time
alarmeCtl = False
area = [200, 25, 320, 300]
def check_overlap(box, area):
x1, y1, x2, y2 = area # Coord
bx1, by1, bx2, by2 = box # CoordBounding Box
return not (bx1 > x2 or bx2 < x1 or by1 > y2 or by2 < y1)
classNames = ["person"]
while True:
success, img = cap.read()
if not success:
print("Error")
break
img_resized = cv2.resize(img, (640, 480))
mask = img.copy()
cv2.rectangle(mask, (area[0], area[1]), (area[2], area[3]), (0, 255, 0), -1)
if frame_count % process_interval == 0:
results = model(img_resized,classes=[0])
detect_person = False
for r in results:
boxes = r.boxes
for box in boxes:
# Bounding box
x1, y1, x2, y2 = box.xyxy[0]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cls = int(box.cls[0])
if cls == 0:
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
if check_overlap((x1, y1, x2, y2), area):
cv2.rectangle(mask, (area[0], area[1]), (area[2], area[3]), (0, 0, 255), -1)
if not alarmeCtl:
alarmeCtl = True
threading.Thread(target=alarme).start()
print("Someone access!")
detect_person = True
# if not detect_person:
# control_sig(False)
frame_count += 1
imgFinal = cv2.addWeighted(mask, 0.5, img, 0.5, 0)
cv2.imshow('Webcam', imgFinal)
if cv2.waitKey(1) == ord('q'):
lgpio.gpio_write(h,out1,0)
lgpio.gpio_write(h,out2,0)
break
cap.release()
cv2.destroyAllWindows()
pygame.quit()