Files
detections/tests/test_az178_realvideo_streaming.py
T
Oleksandr Bezdieniezhnykh 07c2afb62e [AZ-178] Add real-video streaming test, update e2e tests, mark task done
- Add tests/test_az178_realvideo_streaming.py: integration test that validates
  frame decoding begins while upload is still in progress using a real video fixture
- Add conftest.py: pytest plugin for per-test duration reporting
- Update e2e tests (async_sse, performance, security, streaming_video_upload, video)
  and run-tests.sh for updated test suite
- Move AZ-178 task to done/; add data/ to .gitignore (StreamingBuffer temp files)
- Update autopilot state to step 12 (Security Audit) for new feature cycle

Made-with: Cursor
2026-04-01 05:02:25 +03:00

158 lines
5.1 KiB
Python

"""
AZ-178: Streaming video detection with real AI inference.
Uses video_1_faststart.mp4. Stops after 5 seconds.
Requires services (run via run-tests.sh) for model download.
Run: sh run-tests.sh -k test_frames_decoded -s
"""
import os
import threading
import time
from pathlib import Path
import pytest
FIXTURES_DIR = Path(__file__).resolve().parent.parent / "e2e" / "fixtures"
FASTSTART_PATH = FIXTURES_DIR / "video_1_faststart.mp4"
@pytest.fixture(scope="module")
def faststart_video():
if FASTSTART_PATH.is_file():
return str(FASTSTART_PATH)
source = FIXTURES_DIR / "video_1.mp4"
if not source.is_file():
pytest.skip(f"missing source fixture {source}")
import shutil
import subprocess
ffmpeg = shutil.which("ffmpeg")
if not ffmpeg:
pytest.skip("ffmpeg not found")
subprocess.run(
[ffmpeg, "-y", "-i", str(source), "-c", "copy", "-movflags", "+faststart", str(FASTSTART_PATH)],
capture_output=True, check=True,
)
return str(FASTSTART_PATH)
def test_frames_decoded_while_upload_in_progress(faststart_video):
from streaming_buffer import StreamingBuffer
loader_url = os.environ.get("LOADER_URL") or os.environ.get("MOCK_LOADER_URL")
if not loader_url:
pytest.skip("LOADER_URL/MOCK_LOADER_URL not set — run via run-tests.sh for real detections")
from inference import Inference, ai_config_from_dict
from loader_http_client import LoaderHttpClient
client = LoaderHttpClient(loader_url)
inf = Inference(client)
if not inf.is_engine_ready:
pytest.skip("AI engine not available (model download failed)")
# Arrange
ai_cfg = ai_config_from_dict({})
file_size = os.path.getsize(faststart_video)
chunk_size = 64 * 1024
buf = StreamingBuffer(total_size=file_size)
bytes_written = [0]
stop_flag = threading.Event()
writer_start = [0.0]
detections_log = []
first_det_time = []
inf_error = []
from constants_inf import get_annotation_name
last_det_time = [0.0]
def on_annotation(annotation, percent):
now = time.monotonic()
if not first_det_time:
first_det_time.append(now)
elapsed = now - writer_start[0]
delta = now - last_det_time[0] if last_det_time[0] else elapsed
last_det_time[0] = now
written_mb = bytes_written[0] / (1024 * 1024)
pct_file = bytes_written[0] * 100 / file_size
det_strs = [
f"{get_annotation_name(d.cls)}:{d.confidence*100:.0f}% @({d.x:.3f},{d.y:.3f} {d.w:.3f}x{d.h:.3f})"
for d in annotation.detections
]
detections_log.append((now, annotation, percent))
print(f" DET | {elapsed:7.2f}s | +{delta:.3f}s | {written_mb:8.1f} MB | {pct_file:5.1f}% file | "
f"{percent:3d}% video | {len(annotation.detections)} dets | {det_strs}")
def on_status(media_name, count):
print(f" STATUS | {media_name}: {count} total detections")
def writer():
writer_start[0] = time.monotonic()
with open(faststart_video, "rb") as f:
while not stop_flag.is_set():
chunk = f.read(chunk_size)
if not chunk:
break
buf.append(chunk)
bytes_written[0] += len(chunk)
time.sleep(0.001)
buf.close_writer()
def run_inference():
try:
inf.run_detect_video_stream(buf, ai_cfg, "streaming_test", on_annotation, on_status)
except Exception as e:
inf_error.append(e)
print(f"\n Video: {file_size/(1024*1024):.1f} MB (faststart)")
print(f" {'':>6s} {'Time':>8s} {'Delta':>8s} {'Written':>10s} {'% File':>7s} {'% Vid':>5s} {'Dets':>4s} Labels")
print(f" {'-'*95}")
# Act
wt = threading.Thread(target=writer, daemon=True)
wt.start()
inf_thread = threading.Thread(target=run_inference, daemon=True)
inf_thread.start()
inf_thread.join(timeout=5.0)
inf.stop()
stop_flag.set()
buf.close_writer()
wt.join(timeout=5)
inf_thread.join(timeout=5)
try:
buf.close()
os.unlink(buf.path)
except Exception:
pass
# Assert
written_mb = bytes_written[0] / (1024 * 1024)
print(f"\n {'='*60}")
print(f" RESULTS")
print(f" {'='*60}")
print(f" Detections received: {len(detections_log)}")
print(f" File uploaded: {written_mb:.1f} / {file_size/(1024*1024):.1f} MB")
if first_det_time:
ttfd = first_det_time[0] - writer_start[0]
pct_at_first = bytes_written[0] * 100 / file_size
print(f" Time to first detection: {ttfd:.3f}s")
if pct_at_first < 100:
print(f" >>> STREAMING CONFIRMED: detections arrived while upload in progress")
else:
print(f" >>> Detections arrived after full upload")
else:
print(f" Time to first detection: (none — no detections in 5s)")
if inf_error:
print(f" Inference error: {inf_error[0]}")
print(f" {'='*60}\n")
assert not inf_error, f"Inference error: {inf_error}"
assert len(detections_log) > 0, "no detections received in 5s"