I have two tensorflow unet models with input (224, 224, 3) and output (224, 224, 1). I have done the conversion to onnx. I want to combine these models into one i.e. have a common input and then 2 separate nets. I do it like this:
main_runner.join(runner, join_action=JoinAction.AUTO_JOIN_INPUTS)
model_script_commands = [
"model_optimization_flavor(optimization_level=2)\n",
"post_quantization_optimization(finetune, policy=enabled, learning_rate=0.001, epochs=5, batch_size=16)\n"
]
opt_alls = ''.join(model_script_commands)
main_runner.load_model_script(opt_alls)
main_runner.optimize(calib_dataset)
Everything works, it makes a .har file and then a .hef file.
However, when I add the input normalization:
main_runner.join(runner, join_action=JoinAction.AUTO_JOIN_INPUTS)
model_script_commands = [
"normalization1 = normalization([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])\n",
"model_optimization_flavor(optimization_level=2)\n",
"post_quantization_optimization(finetune, policy=enabled, learning_rate=0.001, epochs=5, batch_size=16)\n"
]
opt_alls = ''.join(model_script_commands)
main_runner.load_model_script(opt_alls)
main_runner.optimize(calib_dataset)
The .har file is created correctly, but when compiling to .hef via calling the command:
hef = main_runner.compile()
I get an error:
[error] Failed to produce compiled graph
Node: edge/sh_from_normalization1_to_conv1_sd0-3 already exists in graph
Whole error message:
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/runner/client_runner.py:715, in ClientRunner.compile(self)
705 def compile(self):
706 """DFC API for compiling current model to Hailo hardware.
707
708 Returns:
(...)
713 >>> compiled_model = runner.compile()
714 """
--> 715 return self._compile()
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_common/states/states.py:16, in allowed_states.<locals>.wrap.<locals>.wrapped_func(self, *args, **kwargs)
13 if self._state not in states:
14 raise InvalidStateException("The execution of {} is not available under the state: "
15 "{}".format(func.__name__, self._state.value))
---> 16 return func(self, *args, **kwargs)
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/runner/client_runner.py:832, in ClientRunner._compile(self, fps, mapping_timeout, allocator_script_filename)
828 self._logger.warning(f'Taking model script commands from {allocator_script_filename} and ignoring '
829 f'previous allocation script commands')
830 self.load_model_script(allocator_script_filename)
--> 832 serialized_hef = self._sdk_backend.compile(fps, self.model_script, mapping_timeout)
834 self._auto_model_script = self._sdk_backend.get_auto_alls()
835 self._state = States.COMPILED_MODEL
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/sdk_backend/sdk_backend.py:1440, in SdkBackendCompilation.compile(self, fps, allocator_script, mapping_timeout)
1438 def compile(self, fps, allocator_script=None, mapping_timeout=None):
1439 self._model.fill_default_quantization_params(logger=self._logger)
-> 1440 hef, mapped_graph_file = self._compile(fps, allocator_script, mapping_timeout)
1441 # TODO: https://hailotech.atlassian.net/browse/SDK-31038
1442 if not SDKPaths().is_internal:
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/sdk_backend/sdk_backend.py:1434, in SdkBackendCompilation._compile(self, fps, allocator_script, mapping_timeout)
1429 if not model_params and self.requires_quantized_weights:
1430 raise BackendRuntimeException(
1431 'Model requires quantized weights in order to run on HW, but none were given. '
1432 'Did you forget to quantize?')
-> 1434 hef, mapped_graph_file, auto_alls = self.hef_full_build(fps, mapping_timeout, model_params, allocator_script)
1435 self._auto_alls = auto_alls
1436 return hef, mapped_graph_file
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/sdk_backend/sdk_backend.py:1408, in SdkBackendCompilation.hef_full_build(self, fps, mapping_timeout, params, allocator_script)
1406 config_paths = ConfigPaths(self._hw_arch, self._model.name)
1407 config_paths.set_stage('inference')
-> 1408 auto_alls, self._mapped_graph, self._integrated_graph = allocator.create_mapping_and_full_build_hef(
1409 config_paths.get_path('network_graph'),
1410 config_paths.get_path('mapped_graph'),
1411 config_paths.get_path('compilation_output_proto'),
1412 params=params,
1413 allocator_script=allocator_script,
1414 compiler_statistics_path=config_paths.get_path('compiler_statistics'),
1415 nms_metadata=self._nms_metadata,
1416 har=self.har,
1417 alls_ignore_invalid_cmds=self._alls_ignore_invalid_cmds)
1419 hef_client_server_proto = client_server_api_pb2.ProtoClientServerApiHef()
1420 hef_client_server_proto.ParseFromString(self._mapped_graph)
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/allocator/hailo_tools_runner.py:598, in HailoToolsRunner.create_mapping_and_full_build_hef(self, network_graph_path, output_path, compilation_output_proto, agent, strategy, auto_mapping, params, expected_output_tensor, expected_pre_acts, allocator_script, allocator_script_mode, compiler_statistics_path, nms_metadata, har, alls_ignore_invalid_cmds)
594 if self.hn.net_params.clusters_placement != [[]]:
595 assert len(self.hn.net_params.clusters_placement) <= self._number_of_clusters, (
596 "Number of clusters in layer placements is larger than allowed number of clusters")
--> 598 self.call_builder(network_graph_path, output_path, compilation_output_proto=compilation_output_proto,
599 agent=agent, strategy=strategy, exit_point=BuilderExitPoint.POST_CAT, params=params,
600 expected_output_tensor=expected_output_tensor, expected_pre_acts=expected_pre_acts,
601 allocator_script=allocator_script, allocator_script_mode=allocator_script_mode,
602 compiler_statistics_path=compiler_statistics_path, nms_metadata=nms_metadata, har=har,
603 alls_ignore_invalid_cmds=alls_ignore_invalid_cmds)
605 return self._auto_alls, self._output_integrated_pb_map, self._output_integrated_pb_graph
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/allocator/hailo_tools_runner.py:556, in HailoToolsRunner.call_builder(self, network_graph_path, output_path, blind_deserialize, **kwargs)
554 sys.excepthook = _hailo_tools_exception_hook
555 try:
--> 556 self.run_builder(network_graph_path, output_path, **kwargs)
557 except BackendInternalException:
558 try:
File /local/workspace/hailo_virtualenv/lib/python3.8/site-packages/hailo_sdk_client/allocator/hailo_tools_runner.py:420, in HailoToolsRunner.run_builder(self, network_graph_filename, output_filename, compilation_output_proto, agent, strategy, exit_point, params, expected_output_tensor, expected_pre_acts, allocator_script, allocator_script_mode, compiler_statistics_path, is_debug, nms_metadata, har, alls_ignore_invalid_cmds)
418 compiler_msg = e.hailo_tools_error
419 if compiler_msg:
--> 420 raise e.internal_exception("Compilation failed:", hailo_tools_error=compiler_msg) from None
421 else:
422 raise e.internal_exception("Compilation failed with unexpected crash") from None
BackendAllocatorException: Compilation failed: Node: edge/sh_from_normalization1_to_conv1_sd0-3 already exists in graph
When I do similar but for single network (not joined) everything works fine with or without normalization.
How to resolve that?