mirror of
https://github.com/azaion/detections.git
synced 2026-04-22 16:06:31 +00:00
[AZ-180] Enhance setup and improve inference logging
- Added a new Cython extension for the engine factory to the setup configuration. - Updated the inference module to include additional logging for video batch processing and annotation callbacks. - Refactored test cases to standardize the detection endpoint responses and include channel IDs in headers for better event handling.
This commit is contained in:
@@ -268,14 +268,24 @@ cdef class Inference:
|
||||
batch_count += 1
|
||||
tf = total_frames if total_frames > 0 else max(frame_count, 1)
|
||||
constants_inf.log(<str>f'Video batch {batch_count}: frame {frame_count}/{tf} ({frame_count*100//tf}%)')
|
||||
last_ts = batch_timestamps[len(batch_timestamps) - 1] if batch_timestamps else 0
|
||||
self._process_video_batch(ai_config, batch_frames, batch_timestamps, original_media_name, frame_count, tf, model_w)
|
||||
if self._annotation_callback is not None:
|
||||
pann = Annotation(original_media_name, original_media_name, last_ts, [])
|
||||
cb = self._annotation_callback
|
||||
cb(pann, int(frame_count * 100 / tf))
|
||||
batch_frames = []
|
||||
batch_timestamps = []
|
||||
if batch_frames:
|
||||
batch_count += 1
|
||||
tf = total_frames if total_frames > 0 else max(frame_count, 1)
|
||||
constants_inf.log(<str>f'Video batch {batch_count} (flush): {len(batch_frames)} remaining frames')
|
||||
last_ts = batch_timestamps[len(batch_timestamps) - 1] if batch_timestamps else 0
|
||||
self._process_video_batch(ai_config, batch_frames, batch_timestamps, original_media_name, frame_count, tf, model_w)
|
||||
if self._annotation_callback is not None:
|
||||
pann = Annotation(original_media_name, original_media_name, last_ts, [])
|
||||
cb = self._annotation_callback
|
||||
cb(pann, 100)
|
||||
constants_inf.log(<str>f'Video done: {frame_count} frames read, {batch_count} batches processed')
|
||||
self.send_detection_status()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user