fix race condition issue

put AIAvailabilityEnum.ENABLED to the end of model conversion
This commit is contained in:
Oleksandr Bezdieniezhnykh
2025-09-04 15:39:43 +03:00
parent be77a81875
commit 7d68f7faee
4 changed files with 7 additions and 4 deletions
+2 -1
View File
@@ -14,12 +14,13 @@ cdef class Inference:
cdef dict[str, list(Detection)] _tile_detections
cdef AIRecognitionConfig ai_config
cdef bint stop_signal
cdef AIAvailabilityStatus ai_availability_status
cdef public AIAvailabilityStatus ai_availability_status
cdef str model_input
cdef int model_width
cdef int model_height
cdef bytes _converted_model_bytes
cdef bytes get_onnx_engine_bytes(self)
cdef convert_and_upload_model(self, bytes onnx_engine_bytes, str engine_filename)
cdef init_ai(self)
+1
View File
@@ -87,6 +87,7 @@ cdef class Inference:
self.ai_availability_status.set_status(AIAvailabilityEnum.WARNING, <str>f"Failed to upload converted model: {res.err}")
self._converted_model_bytes = model_bytes
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str> str(e))
self._converted_model_bytes = None
+3 -2
View File
@@ -21,10 +21,11 @@ cdef class CommandProcessor:
def __init__(self, int zmq_port, str loader_zmq_host, int loader_zmq_port, str api_url):
self.remote_handler = RemoteCommandHandler(zmq_port, self.on_command)
self.inference_queue = Queue(maxsize=constants_inf.QUEUE_MAXSIZE)
self.remote_handler.start()
self.running = True
self.loader_client = LoaderClient(loader_zmq_host, loader_zmq_port)
self.inference = Inference(self.loader_client, self.remote_handler)
self.running = True
self.remote_handler.start()
def start(self):
while self.running: