mirror of
https://github.com/azaion/ai-training.git
synced 2026-04-22 08:16:36 +00:00
aeb7f8ca8c
- Modified the existing-code workflow to automatically loop back to New Task after project completion without user confirmation. - Updated the autopilot state to reflect the current step as `done` and status as `completed`. - Clarified the deployment status report by specifying non-deployed services and their purposes. These changes enhance the automation of task management and improve documentation clarity.
208 lines
6.7 KiB
Python
208 lines
6.7 KiB
Python
import shutil
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
import cv2
|
|
import numpy as np
|
|
import onnxruntime as ort
|
|
import pytest
|
|
import torch
|
|
from ultralytics import YOLO
|
|
|
|
import constants as c
|
|
import exports as exports_mod
|
|
|
|
_HAS_TENSORRT = torch.cuda.is_available()
|
|
try:
|
|
import tensorrt
|
|
except ImportError:
|
|
_HAS_TENSORRT = False
|
|
|
|
_TESTS_DIR = Path(__file__).resolve().parent
|
|
_CONFIG_TEST = _TESTS_DIR.parent / "config.test.yaml"
|
|
_DATASET_IMAGES = _TESTS_DIR / "root" / "data" / "images"
|
|
|
|
|
|
class TestOnnxExport:
|
|
def test_onnx_file_created(self, exported_models):
|
|
# Assert
|
|
p = Path(exported_models["onnx"])
|
|
assert p.exists()
|
|
assert p.stat().st_size > 0
|
|
|
|
def test_onnx_batch_dimension_is_dynamic(self, exported_models):
|
|
# Arrange
|
|
session = ort.InferenceSession(exported_models["onnx"], providers=["CPUExecutionProvider"])
|
|
batch_dim = session.get_inputs()[0].shape[0]
|
|
|
|
# Assert
|
|
assert isinstance(batch_dim, str) or batch_dim == -1
|
|
|
|
def test_onnx_inference_batch_1(self, exported_models):
|
|
# Arrange
|
|
session = ort.InferenceSession(exported_models["onnx"], providers=["CPUExecutionProvider"])
|
|
meta = session.get_inputs()[0]
|
|
imgsz = exported_models["imgsz"]
|
|
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
|
|
if not imgs:
|
|
pytest.skip("no test images")
|
|
blob = cv2.dnn.blobFromImage(
|
|
cv2.imread(str(imgs[0])), 1.0 / 255.0, (imgsz, imgsz), (0, 0, 0), swapRB=True, crop=False,
|
|
)
|
|
|
|
# Act
|
|
out = session.run(None, {meta.name: blob})
|
|
|
|
# Assert
|
|
assert out[0].shape[0] == 1
|
|
|
|
def test_onnx_inference_batch_multiple(self, exported_models):
|
|
# Arrange
|
|
session = ort.InferenceSession(exported_models["onnx"], providers=["CPUExecutionProvider"])
|
|
meta = session.get_inputs()[0]
|
|
imgsz = exported_models["imgsz"]
|
|
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
|
|
if not imgs:
|
|
pytest.skip("no test images")
|
|
single = cv2.dnn.blobFromImage(
|
|
cv2.imread(str(imgs[0])), 1.0 / 255.0, (imgsz, imgsz), (0, 0, 0), swapRB=True, crop=False,
|
|
)
|
|
batch = np.concatenate([single] * 4, axis=0)
|
|
|
|
# Act
|
|
out = session.run(None, {meta.name: batch})
|
|
|
|
# Assert
|
|
assert out[0].shape[0] == 4
|
|
|
|
|
|
@pytest.mark.skipif(not _HAS_TENSORRT, reason="TensorRT requires NVIDIA GPU and tensorrt package")
|
|
class TestTensorrtExport:
|
|
@pytest.fixture(scope="class")
|
|
def tensorrt_model(self, exported_models):
|
|
# Arrange
|
|
model_dir = exported_models["model_dir"]
|
|
pt_path = exported_models["pt_path"]
|
|
old_config = c.config
|
|
c.config = c.Config.from_yaml(str(_CONFIG_TEST), root=str(model_dir.parent))
|
|
|
|
# Act
|
|
exports_mod.export_tensorrt(pt_path)
|
|
|
|
c.config = old_config
|
|
engines = list(model_dir.glob("*.engine"))
|
|
yield {
|
|
"engine": str(engines[0]) if engines else None,
|
|
"model_dir": model_dir,
|
|
"imgsz": exported_models["imgsz"],
|
|
}
|
|
|
|
for e in model_dir.glob("*.engine"):
|
|
e.unlink(missing_ok=True)
|
|
|
|
def test_tensorrt_engine_created(self, tensorrt_model):
|
|
# Assert
|
|
assert tensorrt_model["engine"] is not None
|
|
p = Path(tensorrt_model["engine"])
|
|
assert p.exists()
|
|
assert p.stat().st_size > 0
|
|
|
|
def test_tensorrt_inference_batch_1(self, tensorrt_model):
|
|
# Arrange
|
|
assert tensorrt_model["engine"] is not None
|
|
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
|
|
if not imgs:
|
|
pytest.skip("no test images")
|
|
model = YOLO(tensorrt_model["engine"])
|
|
|
|
# Act
|
|
results = model.predict(source=str(imgs[0]), imgsz=tensorrt_model["imgsz"], verbose=False)
|
|
|
|
# Assert
|
|
assert len(results) == 1
|
|
assert results[0].boxes is not None
|
|
|
|
def test_tensorrt_inference_batch_multiple(self, tensorrt_model):
|
|
# Arrange
|
|
assert tensorrt_model["engine"] is not None
|
|
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
|
|
if len(imgs) < 4:
|
|
pytest.skip("need at least 4 test images")
|
|
model = YOLO(tensorrt_model["engine"])
|
|
|
|
# Act
|
|
results = model.predict(source=[str(p) for p in imgs[:4]], imgsz=tensorrt_model["imgsz"], verbose=False)
|
|
|
|
# Assert
|
|
assert len(results) == 4
|
|
|
|
def test_tensorrt_inference_batch_max(self, tensorrt_model):
|
|
# Arrange
|
|
assert tensorrt_model["engine"] is not None
|
|
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
|
|
if not imgs:
|
|
pytest.skip("no test images")
|
|
model = YOLO(tensorrt_model["engine"])
|
|
sources = [str(imgs[0])] * 8
|
|
|
|
# Act
|
|
results = model.predict(source=sources, imgsz=tensorrt_model["imgsz"], verbose=False)
|
|
|
|
# Assert
|
|
assert len(results) == 8
|
|
|
|
|
|
@pytest.mark.skipif(sys.platform != "darwin", reason="CoreML requires macOS")
|
|
class TestCoremlExport:
|
|
def test_coreml_package_created(self, exported_models):
|
|
# Assert
|
|
pkgs = list(exported_models["model_dir"].glob("*.mlpackage"))
|
|
assert len(pkgs) >= 1
|
|
|
|
def test_coreml_package_has_model(self, exported_models):
|
|
# Assert
|
|
pkgs = list(exported_models["model_dir"].glob("*.mlpackage"))
|
|
assert len(pkgs) >= 1
|
|
model_file = pkgs[0] / "Data" / "com.apple.CoreML" / "model.mlmodel"
|
|
assert model_file.exists()
|
|
|
|
def test_coreml_inference_produces_detections(self, exported_models):
|
|
# Arrange
|
|
pkgs = list(exported_models["model_dir"].glob("*.mlpackage"))
|
|
assert len(pkgs) >= 1
|
|
imgs = sorted(_DATASET_IMAGES.glob("*.jpg"))
|
|
if not imgs:
|
|
pytest.skip("no test images")
|
|
model = YOLO(str(pkgs[0]))
|
|
|
|
# Act
|
|
results = model.predict(source=str(imgs[0]), imgsz=exported_models["imgsz"], verbose=False)
|
|
|
|
# Assert
|
|
assert len(results) == 1
|
|
assert results[0].boxes is not None
|
|
|
|
|
|
_INPUT_DATA = _TESTS_DIR.parent / "_docs" / "00_problem" / "input_data"
|
|
|
|
|
|
@pytest.mark.skipif(sys.platform != "darwin", reason="CoreML requires macOS")
|
|
class TestCoremlExportRealModel:
|
|
def test_export_azaion_pt_to_coreml(self, fixture_pt_model):
|
|
# Arrange
|
|
output_dir = _INPUT_DATA / "azaion.mlpackage"
|
|
if output_dir.exists():
|
|
shutil.rmtree(output_dir)
|
|
|
|
# Act
|
|
model = YOLO(fixture_pt_model)
|
|
model.export(format="coreml", imgsz=1280)
|
|
exported = Path(fixture_pt_model).with_suffix(".mlpackage")
|
|
if exported != output_dir:
|
|
shutil.move(str(exported), str(output_dir))
|
|
|
|
# Assert
|
|
assert output_dir.exists()
|
|
model_file = output_dir / "Data" / "com.apple.CoreML" / "model.mlmodel"
|
|
assert model_file.exists()
|