mirror of
https://github.com/azaion/detections.git
synced 2026-04-22 09:06:31 +00:00
[AZ-178] Implement streaming video detection endpoint
- Added `/detect/video` endpoint for true streaming video detection, allowing inference to start as upload bytes arrive. - Introduced `run_detect_video_stream` method in the inference module to handle video processing from a file-like object. - Updated media hashing to include a new function for computing hashes directly from files with minimal I/O. - Enhanced documentation to reflect changes in video processing and API behavior. Made-with: Cursor
This commit is contained in:
@@ -0,0 +1,153 @@
|
||||
"""
|
||||
AZ-178: Streaming video detection with real AI inference.
|
||||
Uses video_1_faststart.mp4. Stops after 10 seconds.
|
||||
|
||||
Requires services (run via run-tests.sh) for model download.
|
||||
Run: sh run-tests.sh -k test_frames_decoded
|
||||
"""
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
FIXTURES_DIR = Path(__file__).resolve().parent.parent / "e2e" / "fixtures"
|
||||
FASTSTART_PATH = FIXTURES_DIR / "video_1_faststart.mp4"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def faststart_video():
|
||||
if FASTSTART_PATH.is_file():
|
||||
return str(FASTSTART_PATH)
|
||||
source = FIXTURES_DIR / "video_1.mp4"
|
||||
if not source.is_file():
|
||||
pytest.skip(f"missing source fixture {source}")
|
||||
import shutil
|
||||
import subprocess
|
||||
ffmpeg = shutil.which("ffmpeg")
|
||||
if not ffmpeg:
|
||||
pytest.skip("ffmpeg not found")
|
||||
subprocess.run(
|
||||
[ffmpeg, "-y", "-i", str(source), "-c", "copy", "-movflags", "+faststart", str(FASTSTART_PATH)],
|
||||
capture_output=True, check=True,
|
||||
)
|
||||
return str(FASTSTART_PATH)
|
||||
|
||||
|
||||
def test_frames_decoded_while_upload_in_progress(faststart_video):
|
||||
from streaming_buffer import StreamingBuffer
|
||||
|
||||
loader_url = os.environ.get("LOADER_URL") or os.environ.get("MOCK_LOADER_URL")
|
||||
if not loader_url:
|
||||
pytest.skip("LOADER_URL/MOCK_LOADER_URL not set — run via run-tests.sh for real detections")
|
||||
|
||||
from inference import Inference, ai_config_from_dict
|
||||
from loader_http_client import LoaderHttpClient
|
||||
|
||||
client = LoaderHttpClient(loader_url)
|
||||
inf = Inference(client)
|
||||
if not inf.is_engine_ready:
|
||||
pytest.skip("AI engine not available (model download failed)")
|
||||
|
||||
# Arrange
|
||||
ai_cfg = ai_config_from_dict({})
|
||||
file_size = os.path.getsize(faststart_video)
|
||||
chunk_size = 64 * 1024
|
||||
buf = StreamingBuffer(total_size=file_size)
|
||||
|
||||
bytes_written = [0]
|
||||
stop_flag = threading.Event()
|
||||
writer_start = [0.0]
|
||||
detections_log = []
|
||||
first_det_time = []
|
||||
inf_error = []
|
||||
|
||||
from constants_inf import get_annotation_name
|
||||
|
||||
def on_annotation(annotation, percent):
|
||||
now = time.monotonic()
|
||||
if not first_det_time:
|
||||
first_det_time.append(now)
|
||||
written_mb = bytes_written[0] / (1024 * 1024)
|
||||
pct_file = bytes_written[0] * 100 / file_size
|
||||
elapsed = now - writer_start[0]
|
||||
det_strs = [
|
||||
f"{get_annotation_name(d.cls)}:{d.confidence*100:.0f}% @({d.x:.3f},{d.y:.3f} {d.w:.3f}x{d.h:.3f})"
|
||||
for d in annotation.detections
|
||||
]
|
||||
detections_log.append((now, annotation, percent))
|
||||
print(f" DET | {elapsed:7.2f}s | {written_mb:8.1f} MB | {pct_file:5.1f}% file | "
|
||||
f"{percent:3d}% video | {len(annotation.detections)} dets | {det_strs}")
|
||||
|
||||
def on_status(media_name, count):
|
||||
print(f" STATUS | {media_name}: {count} total detections")
|
||||
|
||||
def writer():
|
||||
writer_start[0] = time.monotonic()
|
||||
with open(faststart_video, "rb") as f:
|
||||
while not stop_flag.is_set():
|
||||
chunk = f.read(chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
buf.append(chunk)
|
||||
bytes_written[0] += len(chunk)
|
||||
time.sleep(0.001)
|
||||
buf.close_writer()
|
||||
|
||||
def run_inference():
|
||||
try:
|
||||
inf.run_detect_video_stream(buf, ai_cfg, "streaming_test", on_annotation, on_status)
|
||||
except Exception as e:
|
||||
inf_error.append(e)
|
||||
|
||||
print(f"\n Video: {file_size/(1024*1024):.1f} MB (faststart)")
|
||||
print(f" {'':>6s} {'Time':>8s} {'Written':>10s} {'% File':>7s} {'% Vid':>5s} {'Dets':>4s} Labels")
|
||||
print(f" {'-'*80}")
|
||||
|
||||
# Act
|
||||
wt = threading.Thread(target=writer, daemon=True)
|
||||
wt.start()
|
||||
|
||||
inf_thread = threading.Thread(target=run_inference, daemon=True)
|
||||
inf_thread.start()
|
||||
|
||||
inf_thread.join(timeout=10.0)
|
||||
|
||||
inf.stop()
|
||||
stop_flag.set()
|
||||
buf.close_writer()
|
||||
wt.join(timeout=5)
|
||||
inf_thread.join(timeout=5)
|
||||
|
||||
try:
|
||||
buf.close()
|
||||
os.unlink(buf.path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Assert
|
||||
written_mb = bytes_written[0] / (1024 * 1024)
|
||||
print(f"\n {'='*60}")
|
||||
print(f" RESULTS")
|
||||
print(f" {'='*60}")
|
||||
print(f" Detections received: {len(detections_log)}")
|
||||
print(f" File uploaded: {written_mb:.1f} / {file_size/(1024*1024):.1f} MB")
|
||||
|
||||
if first_det_time:
|
||||
ttfd = first_det_time[0] - writer_start[0]
|
||||
pct_at_first = bytes_written[0] * 100 / file_size
|
||||
print(f" Time to first detection: {ttfd:.3f}s")
|
||||
if pct_at_first < 100:
|
||||
print(f" >>> STREAMING CONFIRMED: detections arrived while upload in progress")
|
||||
else:
|
||||
print(f" >>> Detections arrived after full upload")
|
||||
else:
|
||||
print(f" Time to first detection: (none — no detections in 10s)")
|
||||
|
||||
if inf_error:
|
||||
print(f" Inference error: {inf_error[0]}")
|
||||
print(f" {'='*60}\n")
|
||||
|
||||
assert not inf_error, f"Inference error: {inf_error}"
|
||||
assert len(detections_log) > 0, "no detections received in 10s"
|
||||
Reference in New Issue
Block a user