mirror of
https://github.com/azaion/annotations.git
synced 2026-04-22 18:26:31 +00:00
add loader and versioning
This commit is contained in:
@@ -0,0 +1,67 @@
|
||||
import queue
|
||||
import traceback
|
||||
from queue import Queue
|
||||
cimport constants
|
||||
from threading import Thread
|
||||
|
||||
from annotation cimport Annotation
|
||||
from inference cimport Inference
|
||||
from loader_client cimport LoaderClient
|
||||
from remote_command cimport RemoteCommand, CommandType
|
||||
from remote_command_handler cimport RemoteCommandHandler
|
||||
|
||||
cdef class CommandProcessor:
|
||||
|
||||
cdef RemoteCommandHandler remote_handler
|
||||
cdef object inference_queue
|
||||
cdef bint running
|
||||
cdef Inference inference
|
||||
cdef LoaderClient loader_client
|
||||
|
||||
def __init__(self, int zmq_port, str loader_zmq_host, int loader_zmq_port, str api_url):
|
||||
self.remote_handler = RemoteCommandHandler(zmq_port, self.on_command)
|
||||
self.inference_queue = Queue(maxsize=constants.QUEUE_MAXSIZE)
|
||||
self.remote_handler.start()
|
||||
self.running = True
|
||||
self.loader_client = LoaderClient(loader_zmq_host, loader_zmq_port)
|
||||
self.inference = Inference(self.loader_client, self.on_annotation)
|
||||
|
||||
def start(self):
|
||||
while self.running:
|
||||
try:
|
||||
command = self.inference_queue.get(timeout=0.5)
|
||||
self.inference.run_inference(command)
|
||||
end_inference_command = RemoteCommand(CommandType.INFERENCE_DATA, None, 'DONE')
|
||||
self.remote_handler.send(command.client_id, end_inference_command.serialize())
|
||||
except queue.Empty:
|
||||
continue
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
print('EXIT!')
|
||||
|
||||
cdef on_command(self, RemoteCommand command):
|
||||
try:
|
||||
if command.command_type == CommandType.INFERENCE:
|
||||
self.inference_queue.put(command)
|
||||
elif command.command_type == CommandType.AI_AVAILABILITY_CHECK:
|
||||
self.inference.build_tensor_engine(lambda status: self.remote_handler.send(command.client_id,
|
||||
RemoteCommand(CommandType.AI_AVAILABILITY_RESULT, None, status).serialize()))
|
||||
elif command.command_type == CommandType.STOP_INFERENCE:
|
||||
self.inference.stop()
|
||||
elif command.command_type == CommandType.EXIT:
|
||||
t = Thread(target=self.stop) # non-block worker:
|
||||
t.start()
|
||||
else:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Error handling client: {e}")
|
||||
|
||||
cdef on_annotation(self, RemoteCommand cmd, Annotation annotation):
|
||||
cdef RemoteCommand response = RemoteCommand(CommandType.INFERENCE_DATA, annotation.serialize())
|
||||
self.remote_handler.send(cmd.client_id, response.serialize())
|
||||
|
||||
def stop(self):
|
||||
self.inference.stop()
|
||||
self.remote_handler.stop()
|
||||
self.loader_client.stop()
|
||||
self.running = False
|
||||
Reference in New Issue
Block a user