diff --git a/Azaion.Common/Services/Inference/InferenceClient.cs b/Azaion.Common/Services/Inference/InferenceClient.cs index 89da10a..2e612b9 100644 --- a/Azaion.Common/Services/Inference/InferenceClient.cs +++ b/Azaion.Common/Services/Inference/InferenceClient.cs @@ -49,7 +49,7 @@ public class InferenceClient : IInferenceClient Arguments = $"-p {_inferenceClientConfig.ZeroMqPort} -lp {_loaderClientConfig.ZeroMqPort} -a {_inferenceClientConfig.ApiUrl}", CreateNoWindow = true }; - //process.Start(); + process.Start(); } catch (Exception e) { diff --git a/Azaion.Inference/inference.pxd b/Azaion.Inference/inference.pxd index ebafc21..76b3150 100644 --- a/Azaion.Inference/inference.pxd +++ b/Azaion.Inference/inference.pxd @@ -14,12 +14,13 @@ cdef class Inference: cdef dict[str, list(Detection)] _tile_detections cdef AIRecognitionConfig ai_config cdef bint stop_signal - cdef AIAvailabilityStatus ai_availability_status + cdef public AIAvailabilityStatus ai_availability_status cdef str model_input cdef int model_width cdef int model_height + cdef bytes _converted_model_bytes cdef bytes get_onnx_engine_bytes(self) cdef convert_and_upload_model(self, bytes onnx_engine_bytes, str engine_filename) cdef init_ai(self) diff --git a/Azaion.Inference/inference.pyx b/Azaion.Inference/inference.pyx index 2e04ee1..056a4c7 100644 --- a/Azaion.Inference/inference.pyx +++ b/Azaion.Inference/inference.pyx @@ -87,6 +87,7 @@ cdef class Inference: self.ai_availability_status.set_status(AIAvailabilityEnum.WARNING, f"Failed to upload converted model: {res.err}") self._converted_model_bytes = model_bytes + self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED) except Exception as e: self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, str(e)) self._converted_model_bytes = None diff --git a/Azaion.Inference/main_inference.pyx b/Azaion.Inference/main_inference.pyx index 55f0371..766d90b 100644 --- a/Azaion.Inference/main_inference.pyx +++ b/Azaion.Inference/main_inference.pyx @@ -21,10 +21,11 @@ cdef class CommandProcessor: def __init__(self, int zmq_port, str loader_zmq_host, int loader_zmq_port, str api_url): self.remote_handler = RemoteCommandHandler(zmq_port, self.on_command) self.inference_queue = Queue(maxsize=constants_inf.QUEUE_MAXSIZE) - self.remote_handler.start() - self.running = True self.loader_client = LoaderClient(loader_zmq_host, loader_zmq_port) self.inference = Inference(self.loader_client, self.remote_handler) + self.running = True + self.remote_handler.start() + def start(self): while self.running: