from onnx_engine import OnnxEngine from tensorrt_engine import TensorRTEngine from inference import Inference if __name__ == "__main__": # Inference(OnnxEngine('azaion-2025-03-10.onnx', batch_size=4), # confidence_threshold=0.5, iou_threshold=0.3).process('ForAI_test.mp4') # detection for the first 200sec of video: # onnxInference: 81 sec, 6.3Gb VRAM # tensorrt: 54 sec, 3.7Gb VRAM # Inference(TensorRTEngine('azaion-2025-03-10_int8.engine', batch_size=16), # confidence_threshold=0.5, iou_threshold=0.3).process('ForAI_test.mp4') # INT8 for 200sec: 54 sec 3.7Gb # Inference(TensorRTEngine('azaion-2025-03-10_batch8.engine', batch_size=8), # confidence_threshold=0.5, iou_threshold=0.3).process('ForAI_test.mp4') Inference(TensorRTEngine('azaion-2025-03-10-half_batch4.engine', batch_size=4), confidence_threshold=0.5, iou_threshold=0.3).process('ForAI_test.mp4')