mirror of
https://github.com/azaion/detections.git
synced 2026-04-22 07:06:32 +00:00
[AZ-180] Add Jetson Orin Nano support with INT8 TensorRT engine
- Dockerfile.jetson: JetPack 6.x L4T base image (aarch64), TensorRT and PyCUDA from apt - requirements-jetson.txt: derived from requirements.txt, no pip tensorrt/pycuda - docker-compose.jetson.yml: runtime: nvidia for NVIDIA Container Runtime - tensorrt_engine.pyx: convert_from_source accepts optional calib_cache_path; INT8 used when cache present, FP16 fallback; get_engine_filename encodes precision suffix to avoid engine cache confusion - inference.pyx: init_ai tries INT8 engine then FP16 on lookup; downloads calibration cache before conversion thread; passes cache path through to convert_from_source - constants_inf: add INT8_CALIB_CACHE_FILE constant - Unit tests for AC-3 (INT8 flag set when cache provided) and AC-4 (FP16 when no cache) Made-with: Cursor
This commit is contained in:
@@ -0,0 +1,20 @@
|
||||
name: detections-jetson
|
||||
|
||||
services:
|
||||
detections:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.jetson
|
||||
ports:
|
||||
- "8080:8080"
|
||||
runtime: nvidia
|
||||
environment:
|
||||
LOADER_URL: ${LOADER_URL}
|
||||
ANNOTATIONS_URL: ${ANNOTATIONS_URL}
|
||||
env_file: .env
|
||||
volumes:
|
||||
- detections-logs:/app/Logs
|
||||
shm_size: 512m
|
||||
|
||||
volumes:
|
||||
detections-logs:
|
||||
Reference in New Issue
Block a user