Refactor type casting in Cython files for improved clarity and consistency

- Updated various Cython files to explicitly cast types, enhancing type safety and readability.
- Adjusted the `engine_name` property in `InferenceEngine` and its subclasses to be set directly in the constructor.
- Modified the `request` method in `_SessionWithBase` to accept `*args` for better flexibility.
- Ensured proper type casting for return values in methods across multiple classes, including `Inference`, `CoreMLEngine`, and `TensorRTEngine`.

These changes aim to streamline the codebase and improve maintainability by enforcing consistent type usage.
This commit is contained in:
Oleksandr Bezdieniezhnykh
2026-03-30 06:17:16 +03:00
parent 3b30a17e11
commit fc57d677b4
16 changed files with 676 additions and 63 deletions
+21 -20
View File
@@ -63,7 +63,7 @@ cdef class Inference:
res = self.loader_client.load_big_small_resource(filename, models_dir)
if res.err is not None:
raise Exception(res.err)
return res.data
return <bytes>res.data
cdef convert_and_upload_model(self, bytes source_bytes, str engine_filename):
try:
@@ -120,7 +120,7 @@ cdef class Inference:
return
self.ai_availability_status.set_status(AIAvailabilityEnum.WARNING, <str>str(e))
source_bytes = self.download_model(source_filename)
self.is_building_engine = True
self.is_building_engine = <bint>True
thread = Thread(target=self.convert_and_upload_model, args=(source_bytes, engine_filename))
thread.daemon = True
@@ -129,12 +129,12 @@ cdef class Inference:
else:
self.engine = EngineClass(<bytes>self.download_model(constants_inf.AI_ONNX_MODEL_FILE))
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
self.is_building_engine = False
self.is_building_engine = <bint>False
self.model_height, self.model_width = self.engine.get_input_shape()
except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str>str(e))
self.is_building_engine = False
self.is_building_engine = <bint>False
cdef preprocess(self, frames):
@@ -171,7 +171,7 @@ cdef class Inference:
w = x2 - x1
h = y2 - y1
if conf >= ai_config.probability_threshold:
detections.append(Detection(x, y, w, h, class_id, conf))
detections.append(Detection(x, y, w, h, class_id, conf)) # type: ignore[call-arg]
filtered_detections = self.remove_overlapping_detections(detections, ai_config.tracking_intersection_threshold)
results.append(filtered_detections)
return results
@@ -320,7 +320,7 @@ cdef class Inference:
original_media_name = Path(<str>video_name).stem.replace(" ", "")
name = f'{original_media_name}_{constants_inf.format_time(batch_timestamps[i])}'
annotation = Annotation(name, original_media_name, batch_timestamps[i], detections)
annotation = Annotation(name, original_media_name, batch_timestamps[i], detections) # type: ignore[call-arg]
if detections:
valid = self.is_valid_video_annotation(annotation, ai_config)
@@ -348,6 +348,7 @@ cdef class Inference:
cdef _process_images(self, AIRecognitionConfig ai_config, list[str] image_paths):
cdef list frame_data
cdef double ground_sampling_distance
self._tile_detections = {}
for path in image_paths:
frame_data = []
@@ -423,7 +424,7 @@ cdef class Inference:
list_detections = self.postprocess(outputs, ai_config)
for i in range(len(list_detections)):
annotation = Annotation(names[i], original_media_names[i], 0, list_detections[i])
annotation = Annotation(names[i], original_media_names[i], 0, list_detections[i]) # type: ignore[call-arg]
if self.is_valid_image_annotation(annotation, ground_sampling_distance, frames[i].shape):
constants_inf.log(<str> f'Detected {annotation}')
_, image = cv2.imencode('.jpg', frames[i])
@@ -431,7 +432,7 @@ cdef class Inference:
self.on_annotation(annotation)
cpdef stop(self):
self.stop_signal = True
self.stop_signal = <bint>True
cdef remove_tiled_duplicates(self, Annotation annotation):
right = annotation.name.rindex('!')
@@ -448,7 +449,7 @@ cdef class Inference:
for det in annotation.detections:
x1 = det.x * tile_size
y1 = det.y * tile_size
det_abs = Detection(x + x1, y + y1, det.w * tile_size, det.h * tile_size, det.cls, det.confidence)
det_abs = Detection(x + x1, y + y1, det.w * tile_size, det.h * tile_size, det.cls, det.confidence) # type: ignore[call-arg]
if det_abs not in existing_abs_detections:
unique_detections.append(det)
@@ -478,23 +479,23 @@ cdef class Inference:
annotation.detections = valid_detections
if not annotation.detections:
return False
return True
return <bint>False
return <bint>True
cdef bint is_valid_video_annotation(self, Annotation annotation, AIRecognitionConfig ai_config):
if constants_inf.SPLIT_SUFFIX in annotation.name:
self.remove_tiled_duplicates(annotation)
if not annotation.detections:
return False
return <bint>False
if self._previous_annotation is None:
return True
return <bint>True
if annotation.time >= self._previous_annotation.time + <long>(ai_config.frame_recognition_seconds * 1000):
return True
return <bint>True
if len(annotation.detections) > len(self._previous_annotation.detections):
return True
return <bint>True
cdef:
Detection current_det, prev_det
@@ -502,8 +503,8 @@ cdef class Inference:
Detection closest_det
for current_det in annotation.detections:
min_distance_sq = 1e18
closest_det = None
min_distance_sq = <double>1e18
closest_det = <Detection>None
for prev_det in self._previous_annotation.detections:
dx = current_det.x - prev_det.x
@@ -517,9 +518,9 @@ cdef class Inference:
dist_px = ai_config.tracking_distance_confidence * self.model_width
dist_px_sq = dist_px * dist_px
if min_distance_sq > dist_px_sq:
return True
return <bint>True
if current_det.confidence >= closest_det.confidence + ai_config.tracking_probability_increase:
return True
return <bint>True
return False
return <bint>False