Files
ai-training/tests/test_export.py
T
Oleksandr Bezdieniezhnykh 222f552a10 [AZ-171] Add TensorRT tests, AC coverage gate in implement skill, optimize test infrastructure
- Add TensorRT export tests with graceful skip when no GPU available
- Add AC test coverage verification step (Step 8) to implement skill
- Add test coverage gap analysis to new-task skill
- Move exported_models fixture to conftest.py as session-scoped (shared across modules)
- Reorder tests: e2e training runs first so images/labels are available for all tests
- Consolidate teardown into single session-level cleanup in conftest.py
- Fix infrastructure tests to count files dynamically instead of hardcoded 20

Made-with: Cursor
2026-03-28 21:32:28 +02:00

183 lines
5.9 KiB
Python

import sys
from pathlib import Path
import cv2
import numpy as np
import onnxruntime as ort
import pytest
import torch
from ultralytics import YOLO
import constants as c
import exports as exports_mod
_HAS_TENSORRT = torch.cuda.is_available()
try:
import tensorrt
except ImportError:
_HAS_TENSORRT = False
_TESTS_DIR = Path(__file__).resolve().parent
_CONFIG_TEST = _TESTS_DIR.parent / "config.test.yaml"
_DATASET_IMAGES = _TESTS_DIR / "root" / "data" / "images"
class TestOnnxExport:
def test_onnx_file_created(self, exported_models):
# Assert
p = Path(exported_models["onnx"])
assert p.exists()
assert p.stat().st_size > 0
def test_onnx_batch_dimension_is_dynamic(self, exported_models):
# Arrange
session = ort.InferenceSession(exported_models["onnx"], providers=["CPUExecutionProvider"])
batch_dim = session.get_inputs()[0].shape[0]
# Assert
assert isinstance(batch_dim, str) or batch_dim == -1
def test_onnx_inference_batch_1(self, exported_models):
# Arrange
session = ort.InferenceSession(exported_models["onnx"], providers=["CPUExecutionProvider"])
meta = session.get_inputs()[0]
imgsz = exported_models["imgsz"]
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
if not imgs:
pytest.skip("no test images")
blob = cv2.dnn.blobFromImage(
cv2.imread(str(imgs[0])), 1.0 / 255.0, (imgsz, imgsz), (0, 0, 0), swapRB=True, crop=False,
)
# Act
out = session.run(None, {meta.name: blob})
# Assert
assert out[0].shape[0] == 1
def test_onnx_inference_batch_multiple(self, exported_models):
# Arrange
session = ort.InferenceSession(exported_models["onnx"], providers=["CPUExecutionProvider"])
meta = session.get_inputs()[0]
imgsz = exported_models["imgsz"]
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
if not imgs:
pytest.skip("no test images")
single = cv2.dnn.blobFromImage(
cv2.imread(str(imgs[0])), 1.0 / 255.0, (imgsz, imgsz), (0, 0, 0), swapRB=True, crop=False,
)
batch = np.concatenate([single] * 4, axis=0)
# Act
out = session.run(None, {meta.name: batch})
# Assert
assert out[0].shape[0] == 4
@pytest.mark.skipif(not _HAS_TENSORRT, reason="TensorRT requires NVIDIA GPU and tensorrt package")
class TestTensorrtExport:
@pytest.fixture(scope="class")
def tensorrt_model(self, exported_models):
# Arrange
model_dir = exported_models["model_dir"]
pt_path = exported_models["pt_path"]
old_config = c.config
c.config = c.Config.from_yaml(str(_CONFIG_TEST), root=str(model_dir.parent))
# Act
exports_mod.export_tensorrt(pt_path)
c.config = old_config
engines = list(model_dir.glob("*.engine"))
yield {
"engine": str(engines[0]) if engines else None,
"model_dir": model_dir,
"imgsz": exported_models["imgsz"],
}
for e in model_dir.glob("*.engine"):
e.unlink(missing_ok=True)
def test_tensorrt_engine_created(self, tensorrt_model):
# Assert
assert tensorrt_model["engine"] is not None
p = Path(tensorrt_model["engine"])
assert p.exists()
assert p.stat().st_size > 0
def test_tensorrt_inference_batch_1(self, tensorrt_model):
# Arrange
assert tensorrt_model["engine"] is not None
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
if not imgs:
pytest.skip("no test images")
model = YOLO(tensorrt_model["engine"])
# Act
results = model.predict(source=str(imgs[0]), imgsz=tensorrt_model["imgsz"], verbose=False)
# Assert
assert len(results) == 1
assert results[0].boxes is not None
def test_tensorrt_inference_batch_multiple(self, tensorrt_model):
# Arrange
assert tensorrt_model["engine"] is not None
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
if len(imgs) < 4:
pytest.skip("need at least 4 test images")
model = YOLO(tensorrt_model["engine"])
# Act
results = model.predict(source=[str(p) for p in imgs[:4]], imgsz=tensorrt_model["imgsz"], verbose=False)
# Assert
assert len(results) == 4
def test_tensorrt_inference_batch_max(self, tensorrt_model):
# Arrange
assert tensorrt_model["engine"] is not None
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
if not imgs:
pytest.skip("no test images")
model = YOLO(tensorrt_model["engine"])
sources = [str(imgs[0])] * 8
# Act
results = model.predict(source=sources, imgsz=tensorrt_model["imgsz"], verbose=False)
# Assert
assert len(results) == 8
@pytest.mark.skipif(sys.platform != "darwin", reason="CoreML requires macOS")
class TestCoremlExport:
def test_coreml_package_created(self, exported_models):
# Assert
pkgs = list(exported_models["model_dir"].glob("*.mlpackage"))
assert len(pkgs) >= 1
def test_coreml_package_has_model(self, exported_models):
# Assert
pkgs = list(exported_models["model_dir"].glob("*.mlpackage"))
assert len(pkgs) >= 1
model_file = pkgs[0] / "Data" / "com.apple.CoreML" / "model.mlmodel"
assert model_file.exists()
def test_coreml_inference_produces_detections(self, exported_models):
# Arrange
pkgs = list(exported_models["model_dir"].glob("*.mlpackage"))
assert len(pkgs) >= 1
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
if not imgs:
pytest.skip("no test images")
model = YOLO(str(pkgs[0]))
# Act
results = model.predict(source=str(imgs[0]), imgsz=exported_models["imgsz"], verbose=False)
# Assert
assert len(results) == 1
assert results[0].boxes is not None