Files

1.8 KiB

Component Relationship Diagram

graph TD
    subgraph "04 - API Layer"
        API["main.py<br/>(FastAPI endpoints, DTOs, SSE, TokenManager)"]
    end

    subgraph "03 - Inference Pipeline"
        INF["inference<br/>(orchestrator, preprocessing, postprocessing)"]
        LDR["loader_http_client<br/>(model download/upload)"]
    end

    subgraph "02 - Inference Engines"
        IE["inference_engine<br/>(abstract base)"]
        ONNX["onnx_engine<br/>(ONNX Runtime)"]
        TRT["tensorrt_engine<br/>(TensorRT + conversion)"]
    end

    subgraph "01 - Domain"
        CONST["constants_inf<br/>(constants, logging, class registry)"]
        ANNOT["annotation<br/>(Detection, Annotation)"]
        AICFG["ai_config<br/>(AIRecognitionConfig)"]
        STATUS["ai_availability_status<br/>(AIAvailabilityStatus)"]
    end

    subgraph "External Services"
        LOADER["Loader Service<br/>(http://loader:8080)"]
        ANNSVC["Annotations Service<br/>(http://annotations:8080)"]
    end

    API --> INF
    API --> CONST
    API --> LDR
    API --> ANNSVC

    INF --> ONNX
    INF --> TRT
    INF --> LDR
    INF --> CONST
    INF --> ANNOT
    INF --> AICFG
    INF --> STATUS

    ONNX --> IE
    ONNX --> CONST
    TRT --> IE
    TRT --> CONST

    STATUS --> CONST
    ANNOT --> CONST

    LDR --> LOADER

Component Summary

# Component Modules Purpose
01 Domain constants_inf, ai_config, ai_availability_status, annotation Shared data models, enums, constants, logging
02 Inference Engines inference_engine, onnx_engine, tensorrt_engine Pluggable ML inference backends
03 Inference Pipeline inference, loader_http_client Orchestration: engine lifecycle, preprocessing, postprocessing, media processing
04 API main HTTP API, SSE streaming, auth token management