Input buffer size 0 is different than expected

When on a venv with tensorflow or keras installed it throws the following error but when I deactivate the venv and run it gives me the output with no issues.

(myenv) hub@aihub-s0002:~/ai-hub/ai $ python inferences/faceRecognition.py 
[HailoRT] [error] CHECK failed - Input buffer size 0 is different than expected 37632 for input 'arcface_r50/input_layer1'
[HailoRT] [error] CHECK_SUCCESS failed with status=HAILO_INVALID_OPERATION(6)
[HailoRT] [error] CHECK_SUCCESS failed with status=HAILO_INVALID_OPERATION(6)
2025-01-29 22:23:30,369 - ERROR - Error in face recognition: Invalid operation. See hailort.log for more information
Traceback (most recent call last):
  File "/usr/lib/python3/dist-packages/hailo_platform/pyhailort/pyhailort.py", line 3310, in run_async
    cpp_job = self._configured_infer_model.run_async(
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
hailo_platform.pyhailort._pyhailort.HailoRTStatusException: 6

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/home/hub/ai-hub/ai/inferences/faceRecognition.py", line 118, in <module>
    run_face_recognition() 
    ^^^^^^^^^^^^^^^^^^^^^^
  File "/home/hub/ai-hub/ai/inferences/faceRecognition.py", line 96, in run_face_recognition
    configured_infer_model.run([bindings], RECOGNITION_MODEL_CONFIG['timeout_ms'])
  File "/usr/lib/python3/dist-packages/hailo_platform/pyhailort/pyhailort.py", line 3260, in run
    with ExceptionWrapper():
  File "/usr/lib/python3/dist-packages/hailo_platform/pyhailort/pyhailort.py", line 3261, in run
    job = self.run_async(bindings)
          ^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/lib/python3/dist-packages/hailo_platform/pyhailort/pyhailort.py", line 3309, in run_async
    with ExceptionWrapper():
  File "/usr/lib/python3/dist-packages/hailo_platform/pyhailort/pyhailort.py", line 111, in __exit__
    self._raise_indicative_status_exception(value)
  File "/usr/lib/python3/dist-packages/hailo_platform/pyhailort/pyhailort.py", line 156, in _raise_indicative_status_exception
    raise self.create_exception_from_status(error_code) from libhailort_exception
hailo_platform.pyhailort.pyhailort.HailoRTInvalidOperationException: Invalid operation. See hailort.log for more information

Here is the code for reference

import numpy as np
from hailo_platform import VDevice, HailoSchedulingAlgorithm, FormatType
import cv2
from typing import List, Tuple, Dict, Optional
import os
import logging

# Configuration dictionary for face recognition
RECOGNITION_MODEL_CONFIG = {
    'path': '/home/hub/ai-hub/ai/models/arcface_r50h8.hef',
    'batch_size': 2,
    'confidence_threshold': 30,
    'iou_threshold': 0.25,
    'timeout_ms': 1000,
    'input_size': (112, 112)
}

def setup_logging():
    """Configure logging for the application."""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )

def load_and_preprocess_image(image_path: str, target_size: Tuple[int, int]) -> np.ndarray:
    """Load and preprocess image for inference.
    
    Args:
        image_path: Path to the image file
        target_size: Desired size for the image (width, height)
    
    Returns:
        Preprocessed image as numpy array
    """
    try:
        image = cv2.imread(image_path)
        if image is None:
            raise FileNotFoundError(f"Could not load image from {image_path}")
        return cv2.resize(image, target_size).astype(np.uint8)
    except Exception as e:
        logging.error(f"Error preprocessing image: {str(e)}")
        raise

def create_output_buffers(infer_model) -> Dict[str, np.ndarray]:
    """Create output buffers for the model outputs.
    
    Args:
        infer_model: The inference model
    
    Returns:
        Dictionary mapping output names to buffer arrays
    """
    return {
        output.name: np.zeros(output.shape, dtype=np.uint8)
        for output in infer_model.outputs
    }

def run_face_recognition(image_path: Optional[str] = None) -> Dict[str, np.ndarray]:
    """Run face recognition inference on the given image.
    
    Args:
        image_path: Path to image file to process
    
    Returns:
        Dictionary containing raw recognition results
    """
    setup_logging()
    image_path = image_path or '/home/hub/ai-hub/ai/images/class_studs.png'
    
    try:
        # Initialize device
        params = VDevice.create_params()
        params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN

        with VDevice(params) as vdevice:
            # Create and configure infer model
            infer_model = vdevice.create_infer_model(RECOGNITION_MODEL_CONFIG['path'])
            infer_model.set_batch_size(RECOGNITION_MODEL_CONFIG['batch_size'])

            with infer_model.configure() as configured_infer_model:
                bindings = configured_infer_model.create_bindings()

                # Load and preprocess image
                input_tensor = load_and_preprocess_image(
                    image_path, 
                    RECOGNITION_MODEL_CONFIG['input_size']
                )
                bindings.input().set_buffer(input_tensor)

                # Create output buffers and set bindings
                output_buffers = create_output_buffers(infer_model)
                for name, buffer in output_buffers.items():
                    bindings.output(name).set_buffer(buffer)

                # Run inference
                configured_infer_model.run([bindings], RECOGNITION_MODEL_CONFIG['timeout_ms'])

                # Get results
                results = {
                    name: bindings.output(name).get_buffer() 
                    for name in output_buffers
                }

                if not os.path.exists(f'results'):
                    os.makedirs(f'results')

                with open(f'results/recognition_results.txt', 'w') as f:
                    f.write(str(results))
                
                logging.info(f"Recognition results saved to results/recognition_results.txt")
                return results

    except Exception as e:
        logging.error(f"Error in face recognition: {str(e)}")
        raise

if __name__ == "__main__":
    run_face_recognition() 

Hey @hyperwolf ,

Since your code works outside the virtual environment but not inside it, we’re likely dealing with a requirements conflict - that’s what error 6 (invalid operation) usually points to in HailoRT.

Here’s how you can debug this:

  1. First, verify your input dimensions:
input_size = hailo_get_input_stream_frame_size(input_stream)
print(f"Expected input buffer size: {input_size}")

# Add this check before inference
if image.nbytes != input_size:
    raise ValueError(f"Buffer size mismatch: Expected {input_size}, got {image.nbytes}")
  1. If you’re using async streams, double-check that your input buffer is properly initialized - a zero buffer size often means the stream isn’t ready yet.

Try these steps and let me know how it goes! If you need more info, the HailoRT User Guide has some helpful details about this, you can check out pages 134-136 .