mirror of
https://github.com/azaion/detections.git
synced 2026-04-22 08:56:32 +00:00
[AZ-180] Enhance setup and improve inference logging
- Added a new Cython extension for the engine factory to the setup configuration. - Updated the inference module to include additional logging for video batch processing and annotation callbacks. - Refactored test cases to standardize the detection endpoint responses and include channel IDs in headers for better event handling.
This commit is contained in:
@@ -0,0 +1,116 @@
|
||||
import os
|
||||
import tempfile
|
||||
from loader_http_client cimport LoaderHttpClient, LoadResult
|
||||
|
||||
|
||||
class EngineFactory:
|
||||
has_build_step = False
|
||||
|
||||
def create(self, model_bytes: bytes):
|
||||
raise NotImplementedError
|
||||
|
||||
def load_engine(self, LoaderHttpClient loader_client, str models_dir):
|
||||
cdef str filename
|
||||
cdef LoadResult res
|
||||
filename = self._get_ai_engine_filename()
|
||||
if filename is None:
|
||||
return None
|
||||
try:
|
||||
res = loader_client.load_big_small_resource(filename, models_dir)
|
||||
if res.err is None:
|
||||
return self.create(res.data)
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _get_ai_engine_filename(self):
|
||||
return None
|
||||
|
||||
def get_source_filename(self):
|
||||
return None
|
||||
|
||||
def build_from_source(self, onnx_bytes, loader_client, models_dir):
|
||||
raise NotImplementedError(f"{type(self).__name__} does not support building from source")
|
||||
|
||||
|
||||
class OnnxEngineFactory(EngineFactory):
|
||||
def create(self, model_bytes: bytes):
|
||||
from engines.onnx_engine import OnnxEngine
|
||||
return OnnxEngine(model_bytes)
|
||||
|
||||
def get_source_filename(self):
|
||||
import constants_inf
|
||||
return constants_inf.AI_ONNX_MODEL_FILE
|
||||
|
||||
|
||||
class CoreMLEngineFactory(EngineFactory):
|
||||
def create(self, model_bytes: bytes):
|
||||
from engines.coreml_engine import CoreMLEngine
|
||||
return CoreMLEngine(model_bytes)
|
||||
|
||||
def _get_ai_engine_filename(self):
|
||||
return "azaion_coreml.zip"
|
||||
|
||||
|
||||
class TensorRTEngineFactory(EngineFactory):
|
||||
has_build_step = True
|
||||
|
||||
def create(self, model_bytes: bytes):
|
||||
from engines.tensorrt_engine import TensorRTEngine
|
||||
return TensorRTEngine(model_bytes)
|
||||
|
||||
def _get_ai_engine_filename(self):
|
||||
from engines.tensorrt_engine import TensorRTEngine
|
||||
return TensorRTEngine.get_engine_filename()
|
||||
|
||||
def get_source_filename(self):
|
||||
import constants_inf
|
||||
return constants_inf.AI_ONNX_MODEL_FILE
|
||||
|
||||
def build_from_source(self, onnx_bytes, loader_client, models_dir):
|
||||
from engines.tensorrt_engine import TensorRTEngine
|
||||
engine_bytes = TensorRTEngine.convert_from_source(onnx_bytes, None)
|
||||
return engine_bytes, TensorRTEngine.get_engine_filename()
|
||||
|
||||
|
||||
class JetsonTensorRTEngineFactory(TensorRTEngineFactory):
|
||||
def create(self, model_bytes: bytes):
|
||||
from engines.jetson_tensorrt_engine import JetsonTensorRTEngine
|
||||
return JetsonTensorRTEngine(model_bytes)
|
||||
|
||||
def _get_ai_engine_filename(self):
|
||||
from engines.tensorrt_engine import TensorRTEngine
|
||||
return TensorRTEngine.get_engine_filename("int8")
|
||||
|
||||
def build_from_source(self, onnx_bytes, LoaderHttpClient loader_client, str models_dir):
|
||||
cdef str calib_cache_path
|
||||
from engines.tensorrt_engine import TensorRTEngine
|
||||
calib_cache_path = self._download_calib_cache(loader_client, models_dir)
|
||||
try:
|
||||
engine_bytes = TensorRTEngine.convert_from_source(onnx_bytes, calib_cache_path)
|
||||
return engine_bytes, TensorRTEngine.get_engine_filename("int8")
|
||||
finally:
|
||||
if calib_cache_path is not None:
|
||||
try:
|
||||
os.unlink(calib_cache_path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _download_calib_cache(self, LoaderHttpClient loader_client, str models_dir):
|
||||
cdef LoadResult res
|
||||
import constants_inf
|
||||
try:
|
||||
res = loader_client.load_big_small_resource(
|
||||
constants_inf.INT8_CALIB_CACHE_FILE, models_dir
|
||||
)
|
||||
if res.err is not None:
|
||||
constants_inf.log(f"INT8 calibration cache not available: {res.err}")
|
||||
return None
|
||||
fd, path = tempfile.mkstemp(suffix=".cache")
|
||||
with os.fdopen(fd, "wb") as f:
|
||||
f.write(res.data)
|
||||
constants_inf.log("INT8 calibration cache downloaded")
|
||||
return path
|
||||
except Exception as e:
|
||||
constants_inf.log(f"INT8 calibration cache download failed: {str(e)}")
|
||||
return None
|
||||
Reference in New Issue
Block a user