fix some cython code

This commit is contained in:
Oleksandr Bezdieniezhnykh
2026-03-29 21:18:18 +03:00
parent ad5530b9ef
commit 6269a7485c
32 changed files with 17108 additions and 2728 deletions
+43 -26
View File
@@ -8,28 +8,42 @@ cimport constants_inf
from ai_availability_status cimport AIAvailabilityEnum, AIAvailabilityStatus
from annotation cimport Detection, Annotation
from ai_config cimport AIRecognitionConfig
from engines.inference_engine cimport InferenceEngine
from loader_http_client cimport LoaderHttpClient
from threading import Thread
from engines import tensor_gpu_index, create_engine
if tensor_gpu_index > -1:
from engines.tensorrt_engine import TensorRTEngine
from engines import EngineClass
cdef class Inference:
cdef LoaderHttpClient loader_client
cdef InferenceEngine engine
cdef object _annotation_callback
cdef object _status_callback
cdef Annotation _previous_annotation
cdef dict[str, list[Detection]] _tile_detections
cdef dict[str, int] detection_counts
cdef AIRecognitionConfig ai_config
cdef bint stop_signal
cdef public AIAvailabilityStatus ai_availability_status
cdef str model_input
cdef int model_width
cdef int model_height
cdef bytes _converted_model_bytes
cdef bint is_building_engine
def __init__(self, loader_client):
self.loader_client = loader_client
self._annotation_callback = None
self._status_callback = None
self.stop_signal = False
self.model_input = None
self.stop_signal = <bint>False
self.model_input = <str>None
self.model_width = 0
self.model_height = 0
self.detection_counts = {}
self.engine = None
self.is_building_engine = False
self.engine = <InferenceEngine>None
self.is_building_engine = <bint>False
self.ai_availability_status = AIAvailabilityStatus()
self._converted_model_bytes = None
self._converted_model_bytes = <bytes>None
self.init_ai()
@property
@@ -49,7 +63,7 @@ cdef class Inference:
try:
self.ai_availability_status.set_status(AIAvailabilityEnum.CONVERTING)
models_dir = constants_inf.MODELS_FOLDER
model_bytes = TensorRTEngine.convert_from_onnx(onnx_engine_bytes)
model_bytes = EngineClass.convert_from_onnx(onnx_engine_bytes)
self.ai_availability_status.set_status(AIAvailabilityEnum.UPLOADING)
res = self.loader_client.upload_big_small_resource(model_bytes, engine_filename, models_dir)
@@ -60,9 +74,9 @@ cdef class Inference:
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str> str(e))
self._converted_model_bytes = None
self._converted_model_bytes = <bytes>None
finally:
self.is_building_engine = False
self.is_building_engine = <bint>False
cdef init_ai(self):
constants_inf.log(<str> 'init AI...')
@@ -74,24 +88,24 @@ cdef class Inference:
if self._converted_model_bytes is not None:
try:
self.engine = TensorRTEngine(self._converted_model_bytes)
self.engine = EngineClass(self._converted_model_bytes)
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
self.model_height, self.model_width = self.engine.get_input_shape()
except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str> str(e))
finally:
self._converted_model_bytes = None # Consume the bytes
self._converted_model_bytes = <bytes>None
return
models_dir = constants_inf.MODELS_FOLDER
if tensor_gpu_index > -1:
engine_filename = EngineClass.get_engine_filename()
if engine_filename is not None:
try:
engine_filename = TensorRTEngine.get_engine_filename(0)
self.ai_availability_status.set_status(AIAvailabilityEnum.DOWNLOADING)
res = self.loader_client.load_big_small_resource(engine_filename, models_dir)
if res.err is not None:
raise Exception(res.err)
self.engine = TensorRTEngine(res.data)
self.engine = EngineClass(res.data)
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.WARNING, <str>str(e))
@@ -103,7 +117,8 @@ cdef class Inference:
thread.start()
return
else:
self.engine = create_engine(<bytes>self.get_onnx_engine_bytes())
self.engine = EngineClass(<bytes>self.get_onnx_engine_bytes())
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
self.is_building_engine = False
self.model_height, self.model_width = self.engine.get_input_shape()
@@ -178,7 +193,7 @@ cdef class Inference:
cdef bint is_video(self, str filepath):
mime_type, _ = mimetypes.guess_type(<str>filepath)
return mime_type and mime_type.startswith("video")
return <bint>(mime_type and mime_type.startswith("video"))
cdef split_list_extend(self, lst, chunk_size):
chunks = [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
@@ -200,7 +215,7 @@ cdef class Inference:
self._annotation_callback = annotation_callback
self._status_callback = status_callback
self.stop_signal = False
self.stop_signal = <bint>False
self.init_ai()
if self.engine is None:
constants_inf.log(<str> "AI engine not available. Conversion may be in progress. Skipping inference.")
@@ -246,9 +261,9 @@ cdef class Inference:
cdef int frame_count = 0
cdef int batch_count = 0
cdef list batch_frames = []
cdef list[int] batch_timestamps = []
cdef list[long] batch_timestamps = []
cdef Annotation annotation
self._previous_annotation = None
self._previous_annotation = <Annotation>None
v_input = cv2.VideoCapture(<str>video_name)
if not v_input.isOpened():
@@ -267,7 +282,7 @@ cdef class Inference:
frame_count += 1
if frame_count % ai_config.frame_period_recognition == 0:
batch_frames.append(frame)
batch_timestamps.append(int(v_input.get(cv2.CAP_PROP_POS_MSEC)))
batch_timestamps.append(<long>v_input.get(cv2.CAP_PROP_POS_MSEC))
if len(batch_frames) == self.engine.get_batch_size():
batch_count += 1
@@ -308,7 +323,8 @@ cdef class Inference:
self.detection_counts[annotation.original_media_name] = self.detection_counts.get(annotation.original_media_name, 0) + 1
if self._annotation_callback is not None:
percent = int(frame_count * 100 / total_frames) if total_frames > 0 else 0
self._annotation_callback(annotation, percent)
cb = self._annotation_callback
cb(annotation, percent)
cdef _process_images(self, AIRecognitionConfig ai_config, list[str] image_paths):
cdef list frame_data
@@ -343,8 +359,9 @@ cdef class Inference:
cdef send_detection_status(self):
if self._status_callback is not None:
cb = self._status_callback
for media_name in self.detection_counts.keys():
self._status_callback(media_name, self.detection_counts[media_name])
cb(media_name, self.detection_counts[media_name])
self.detection_counts.clear()
cdef split_to_tiles(self, frame, path, tile_size, overlap_percent):