mirror of
https://github.com/azaion/detections.git
synced 2026-04-22 10:36:32 +00:00
[AZ-178] Implement streaming video detection endpoint
- Added `/detect/video` endpoint for true streaming video detection, allowing inference to start as upload bytes arrive. - Introduced `run_detect_video_stream` method in the inference module to handle video processing from a file-like object. - Updated media hashing to include a new function for computing hashes directly from files with minimal I/O. - Enhanced documentation to reflect changes in video processing and API behavior. Made-with: Cursor
This commit is contained in:
+132
@@ -467,6 +467,138 @@ async def detect_image(
|
||||
pass
|
||||
|
||||
|
||||
@app.post("/detect/video")
|
||||
async def detect_video_upload(request: Request):
|
||||
from media_hash import compute_media_content_hash_from_file
|
||||
from inference import ai_config_from_dict
|
||||
from streaming_buffer import StreamingBuffer
|
||||
|
||||
filename = request.headers.get("x-filename", "upload.mp4")
|
||||
config_json = request.headers.get("x-config", "")
|
||||
ext = _normalize_upload_ext(filename)
|
||||
if ext not in _VIDEO_EXTENSIONS:
|
||||
raise HTTPException(status_code=400, detail="Expected a video file extension")
|
||||
|
||||
config_dict = json.loads(config_json) if config_json else {}
|
||||
ai_cfg = ai_config_from_dict(config_dict)
|
||||
|
||||
auth_header = request.headers.get("authorization", "")
|
||||
access_token = auth_header.removeprefix("Bearer ").strip() if auth_header else ""
|
||||
refresh_token = request.headers.get("x-refresh-token", "")
|
||||
token_mgr = TokenManager(access_token, refresh_token) if access_token else None
|
||||
user_id = TokenManager.decode_user_id(access_token) if access_token else None
|
||||
|
||||
videos_dir = os.environ.get(
|
||||
"VIDEOS_DIR", os.path.join(os.getcwd(), "data", "videos")
|
||||
)
|
||||
os.makedirs(videos_dir, exist_ok=True)
|
||||
content_length = request.headers.get("content-length")
|
||||
total_size = int(content_length) if content_length else None
|
||||
buffer = StreamingBuffer(videos_dir, total_size=total_size)
|
||||
media_name = Path(filename).stem.replace(" ", "")
|
||||
loop = asyncio.get_event_loop()
|
||||
inf = get_inference()
|
||||
|
||||
def _enqueue(event):
|
||||
for q in _event_queues:
|
||||
try:
|
||||
q.put_nowait(event)
|
||||
except asyncio.QueueFull:
|
||||
pass
|
||||
|
||||
placeholder_id = f"tmp_{os.path.basename(buffer.path)}"
|
||||
|
||||
def on_annotation(annotation, percent):
|
||||
dtos = [detection_to_dto(d) for d in annotation.detections]
|
||||
event = DetectionEvent(
|
||||
annotations=dtos,
|
||||
mediaId=placeholder_id,
|
||||
mediaStatus="AIProcessing",
|
||||
mediaPercent=percent,
|
||||
)
|
||||
loop.call_soon_threadsafe(_enqueue, event)
|
||||
|
||||
def on_status(media_name_cb, count):
|
||||
event = DetectionEvent(
|
||||
annotations=[],
|
||||
mediaId=placeholder_id,
|
||||
mediaStatus="AIProcessed",
|
||||
mediaPercent=100,
|
||||
)
|
||||
loop.call_soon_threadsafe(_enqueue, event)
|
||||
|
||||
def run_inference():
|
||||
inf.run_detect_video_stream(buffer, ai_cfg, media_name, on_annotation, on_status)
|
||||
|
||||
inference_future = loop.run_in_executor(executor, run_inference)
|
||||
|
||||
try:
|
||||
async for chunk in request.stream():
|
||||
await loop.run_in_executor(None, buffer.append, chunk)
|
||||
except Exception:
|
||||
buffer.close_writer()
|
||||
buffer.close()
|
||||
raise
|
||||
buffer.close_writer()
|
||||
|
||||
content_hash = compute_media_content_hash_from_file(buffer.path)
|
||||
if not ext.startswith("."):
|
||||
ext = "." + ext
|
||||
storage_path = os.path.abspath(os.path.join(videos_dir, f"{content_hash}{ext}"))
|
||||
|
||||
if token_mgr and user_id:
|
||||
os.rename(buffer.path, storage_path)
|
||||
payload = {
|
||||
"id": content_hash,
|
||||
"name": Path(filename).name,
|
||||
"path": storage_path,
|
||||
"mediaType": "Video",
|
||||
"mediaStatus": _MEDIA_STATUS_NEW,
|
||||
"userId": user_id,
|
||||
}
|
||||
bearer = token_mgr.get_valid_token()
|
||||
_post_media_record(payload, bearer)
|
||||
_put_media_status(content_hash, _MEDIA_STATUS_AI_PROCESSING, bearer)
|
||||
|
||||
async def _wait_inference():
|
||||
try:
|
||||
await inference_future
|
||||
if token_mgr and user_id:
|
||||
_put_media_status(
|
||||
content_hash, _MEDIA_STATUS_AI_PROCESSED,
|
||||
token_mgr.get_valid_token(),
|
||||
)
|
||||
done_event = DetectionEvent(
|
||||
annotations=[],
|
||||
mediaId=content_hash,
|
||||
mediaStatus="AIProcessed",
|
||||
mediaPercent=100,
|
||||
)
|
||||
_enqueue(done_event)
|
||||
except Exception:
|
||||
if token_mgr and user_id:
|
||||
_put_media_status(
|
||||
content_hash, _MEDIA_STATUS_ERROR,
|
||||
token_mgr.get_valid_token(),
|
||||
)
|
||||
err_event = DetectionEvent(
|
||||
annotations=[], mediaId=content_hash,
|
||||
mediaStatus="Error", mediaPercent=0,
|
||||
)
|
||||
_enqueue(err_event)
|
||||
finally:
|
||||
_active_detections.pop(content_hash, None)
|
||||
buffer.close()
|
||||
if not (token_mgr and user_id) and os.path.isfile(buffer.path):
|
||||
try:
|
||||
os.unlink(buffer.path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
_active_detections[content_hash] = asyncio.create_task(_wait_inference())
|
||||
return {"status": "started", "mediaId": content_hash}
|
||||
|
||||
|
||||
def _post_annotation_to_service(token_mgr: TokenManager, media_id: str,
|
||||
annotation, dtos: list[DetectionDto]):
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user