Files
detections/e2e/tests/test_resource_limits.py
T
Oleksandr Bezdieniezhnykh 27f4aceb52 Refactor inference and AI configuration handling
- Updated the `Inference` class to replace the `get_onnx_engine_bytes` method with `download_model`, allowing for dynamic model loading based on a specified filename.
- Modified the `convert_and_upload_model` method to accept `source_bytes` instead of `onnx_engine_bytes`, enhancing flexibility in model conversion.
- Introduced a new property `engine_name` to the `Inference` class for better access to engine details.
- Adjusted the `AIRecognitionConfig` structure to include a new method pointer `from_dict`, improving configuration handling.
- Updated various test cases to reflect changes in model paths and timeout settings, ensuring consistency and reliability in testing.
2026-03-30 00:22:56 +03:00

112 lines
3.3 KiB
Python

import json
import re
import threading
import time
import uuid
from datetime import datetime
from pathlib import Path
import pytest
def _video_ai_body(video_path: str) -> dict:
return {
"probability_threshold": 0.25,
"tracking_intersection_threshold": 0.6,
"altitude": 400,
"focal_length": 24,
"sensor_width": 23.5,
"paths": [video_path],
"frame_period_recognition": 4,
"frame_recognition_seconds": 2,
}
@pytest.mark.skip(reason="Single video run — covered by test_ft_p09_sse_event_delivery")
@pytest.mark.slow
@pytest.mark.timeout(300)
def test_ft_n_08_nft_res_lim_02_sse_queue_bounded_best_effort(
warm_engine,
http_client,
jwt_token,
video_short_path,
sse_client_factory,
):
media_id = f"rlim-sse-{uuid.uuid4().hex}"
body = _video_ai_body(video_short_path)
headers = {"Authorization": f"Bearer {jwt_token}"}
collected: list[dict] = []
thread_exc: list[BaseException] = []
done = threading.Event()
def _listen():
try:
with sse_client_factory() as sse:
time.sleep(0.3)
for event in sse.events():
if not event.data or not str(event.data).strip():
continue
data = json.loads(event.data)
if data.get("mediaId") != media_id:
continue
collected.append(data)
if (
data.get("mediaStatus") == "AIProcessed"
and data.get("mediaPercent") == 100
):
break
except BaseException as e:
thread_exc.append(e)
finally:
done.set()
th = threading.Thread(target=_listen, daemon=True)
th.start()
time.sleep(0.5)
r = http_client.post(f"/detect/{media_id}", json=body, headers=headers)
assert r.status_code == 200
assert done.wait(timeout=290)
th.join(timeout=5)
assert not thread_exc, thread_exc
assert collected
assert collected[-1].get("mediaStatus") == "AIProcessed"
@pytest.mark.slow
@pytest.mark.timeout(120)
def test_nft_res_lim_03_max_detections_per_frame(
warm_engine, http_client, image_dense
):
r = http_client.post(
"/detect",
files={"file": ("img.jpg", image_dense, "image/jpeg")},
timeout=120,
)
assert r.status_code == 200
body = r.json()
assert isinstance(body, list)
assert len(body) <= 300
@pytest.mark.slow
def test_nft_res_lim_04_log_file_rotation(warm_engine, http_client, image_small):
http_client.post(
"/detect",
files={"file": ("img.jpg", image_small, "image/jpeg")},
timeout=60,
)
candidates = [
Path(__file__).resolve().parent.parent / "logs",
Path("/app/Logs"),
]
log_dir = next((p for p in candidates if p.is_dir()), None)
if log_dir is None:
pytest.skip("Log directory not accessible from e2e-runner container")
today = datetime.now().strftime("%Y%m%d")
expected = f"log_inference_{today}.txt"
names = {p.name for p in log_dir.iterdir() if p.is_file()}
if expected not in names:
pat = re.compile(r"^log_inference_\d{8}\.txt$")
assert any(pat.match(n) for n in names), names