Refactor inference engine and task management: Remove obsolete inference engine and ONNX engine files, update inference processing to utilize batch handling, and enhance task management structure in documentation. Adjust paths for task specifications to align with new directory organization.

This commit is contained in:
Oleksandr Bezdieniezhnykh
2026-03-28 01:04:28 +02:00
parent 1e4ef299f9
commit 5be53739cd
60 changed files with 111875 additions and 208 deletions
+49 -16
View File
@@ -1,17 +1,16 @@
import csv
import json
import os
import threading
import time
import uuid
import pytest
def _video_load_url(mock_loader_url: str, video_media_path: str) -> str:
name = video_media_path.rstrip("/").split("/")[-1]
return f"{mock_loader_url.rstrip('/')}/load/{name}"
RESULTS_DIR = os.environ.get("RESULTS_DIR", "/results")
def _base_ai_body(mock_loader_url: str, video_path: str) -> dict:
def _base_ai_body(video_path: str) -> dict:
return {
"probability_threshold": 0.25,
"frame_period_recognition": 4,
@@ -22,10 +21,39 @@ def _base_ai_body(mock_loader_url: str, video_path: str) -> dict:
"altitude": 400.0,
"focal_length": 24.0,
"sensor_width": 23.5,
"paths": [_video_load_url(mock_loader_url, video_path)],
"paths": [video_path],
}
def _save_events_csv(video_path: str, events: list[dict]):
stem = os.path.splitext(os.path.basename(video_path))[0]
path = os.path.join(RESULTS_DIR, f"{stem}_detections.csv")
rows = []
for ev in events:
base = {
"mediaId": ev.get("mediaId", ""),
"mediaStatus": ev.get("mediaStatus", ""),
"mediaPercent": ev.get("mediaPercent", ""),
}
anns = ev.get("annotations") or []
if anns:
for det in anns:
rows.append({**base, **det})
else:
rows.append(base)
if not rows:
return
fieldnames = list(rows[0].keys())
for r in rows[1:]:
for k in r:
if k not in fieldnames:
fieldnames.append(k)
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction="ignore")
writer.writeheader()
writer.writerows(rows)
def _run_async_video_sse(
http_client,
jwt_token,
@@ -34,9 +62,11 @@ def _run_async_video_sse(
body: dict,
*,
timed: bool = False,
wait_s: float = 120.0,
wait_s: float = 900.0,
):
video_path = (body.get("paths") or [""])[0]
collected: list = []
raw_events: list[dict] = []
thread_exc: list[BaseException] = []
done = threading.Event()
@@ -50,6 +80,7 @@ def _run_async_video_sse(
data = json.loads(event.data)
if data.get("mediaId") != media_id:
continue
raw_events.append(data)
if timed:
collected.append((time.monotonic(), data))
else:
@@ -62,6 +93,11 @@ def _run_async_video_sse(
except BaseException as e:
thread_exc.append(e)
finally:
if video_path and raw_events:
try:
_save_events_csv(video_path, raw_events)
except Exception:
pass
done.set()
th = threading.Thread(target=_listen, daemon=True)
@@ -96,17 +132,16 @@ def _assert_detection_dto(d: dict) -> None:
@pytest.mark.slow
@pytest.mark.timeout(120)
@pytest.mark.timeout(900)
def test_ft_p_10_frame_sampling_ac1(
warm_engine,
http_client,
jwt_token,
mock_loader_url,
video_short_path,
sse_client_factory,
):
media_id = f"video-{uuid.uuid4().hex}"
body = _base_ai_body(mock_loader_url, video_short_path)
body = _base_ai_body(video_short_path)
body["frame_period_recognition"] = 4
collected = _run_async_video_sse(
http_client,
@@ -123,17 +158,16 @@ def test_ft_p_10_frame_sampling_ac1(
@pytest.mark.slow
@pytest.mark.timeout(120)
@pytest.mark.timeout(900)
def test_ft_p_11_annotation_interval_ac2(
warm_engine,
http_client,
jwt_token,
mock_loader_url,
video_short_path,
sse_client_factory,
):
media_id = f"video-{uuid.uuid4().hex}"
body = _base_ai_body(mock_loader_url, video_short_path)
body = _base_ai_body(video_short_path)
body["frame_recognition_seconds"] = 2
collected = _run_async_video_sse(
http_client,
@@ -158,17 +192,16 @@ def test_ft_p_11_annotation_interval_ac2(
@pytest.mark.slow
@pytest.mark.timeout(120)
@pytest.mark.timeout(900)
def test_ft_p_12_movement_tracking_ac3(
warm_engine,
http_client,
jwt_token,
mock_loader_url,
video_short_path,
sse_client_factory,
):
media_id = f"video-{uuid.uuid4().hex}"
body = _base_ai_body(mock_loader_url, video_short_path)
body = _base_ai_body(video_short_path)
body["tracking_distance_confidence"] = 0.1
body["tracking_probability_increase"] = 0.1
collected = _run_async_video_sse(