Update test results directory structure and enhance Docker configurations

- Modified `.gitignore` to reflect the new path for test results.
- Updated `docker-compose.test.yml` to mount the correct test results directory.
- Adjusted `Dockerfile.test` to set the `PYTHONPATH` and ensure test results are saved in the updated location.
- Added `boto3` and `netron` to `requirements-test.txt` to support new functionalities.
- Updated `pytest.ini` to include the new `pythonpath` for test discovery.

These changes streamline the testing process and ensure compatibility with the updated directory structure.
This commit is contained in:
Oleksandr Bezdieniezhnykh
2026-03-28 00:13:08 +02:00
parent c20018745b
commit 243b69656b
48 changed files with 707 additions and 581 deletions
+19 -36
View File
@@ -1,6 +1,4 @@
import shutil
import sys
import types
from os import path as osp
from pathlib import Path
@@ -9,40 +7,6 @@ import pytest
import constants as c_mod
def _stub_train_dependencies():
if getattr(_stub_train_dependencies, "_done", False):
return
def add_mod(name):
if name in sys.modules:
return sys.modules[name]
m = types.ModuleType(name)
sys.modules[name] = m
return m
ultra = add_mod("ultralytics")
class YOLO:
pass
ultra.YOLO = YOLO
def fake_client(*_a, **_k):
return types.SimpleNamespace(
upload_fileobj=lambda *_a, **_k: None,
download_file=lambda *_a, **_k: None,
)
boto = add_mod("boto3")
boto.client = fake_client
add_mod("netron")
add_mod("requests")
_stub_train_dependencies._done = True
_stub_train_dependencies()
def _prepare_form_dataset(
monkeypatch,
tmp_path,
@@ -84,6 +48,7 @@ def test_bt_dsf_01_split_ratio_70_20_10(
fixture_images_dir,
fixture_labels_dir,
):
# Arrange
train, today_ds = _prepare_form_dataset(
monkeypatch,
tmp_path,
@@ -93,7 +58,9 @@ def test_bt_dsf_01_split_ratio_70_20_10(
100,
set(),
)
# Act
train.form_dataset()
# Assert
assert _count_jpg(Path(today_ds, "train", "images")) == 70
assert _count_jpg(Path(today_ds, "valid", "images")) == 20
assert _count_jpg(Path(today_ds, "test", "images")) == 10
@@ -106,6 +73,7 @@ def test_bt_dsf_02_six_subdirectories(
fixture_images_dir,
fixture_labels_dir,
):
# Arrange
train, today_ds = _prepare_form_dataset(
monkeypatch,
tmp_path,
@@ -115,7 +83,9 @@ def test_bt_dsf_02_six_subdirectories(
100,
set(),
)
# Act
train.form_dataset()
# Assert
base = Path(today_ds)
assert (base / "train" / "images").is_dir()
assert (base / "train" / "labels").is_dir()
@@ -132,6 +102,7 @@ def test_bt_dsf_03_total_files_one_hundred(
fixture_images_dir,
fixture_labels_dir,
):
# Arrange
train, today_ds = _prepare_form_dataset(
monkeypatch,
tmp_path,
@@ -141,7 +112,9 @@ def test_bt_dsf_03_total_files_one_hundred(
100,
set(),
)
# Act
train.form_dataset()
# Assert
n = (
_count_jpg(Path(today_ds, "train", "images"))
+ _count_jpg(Path(today_ds, "valid", "images"))
@@ -157,6 +130,7 @@ def test_bt_dsf_04_corrupted_labels_quarantined(
fixture_images_dir,
fixture_labels_dir,
):
# Arrange
stems = [p.stem for p in sorted(fixture_images_dir.glob("*.jpg"))[:100]]
corrupt = set(stems[:5])
train, today_ds = _prepare_form_dataset(
@@ -168,7 +142,9 @@ def test_bt_dsf_04_corrupted_labels_quarantined(
100,
corrupt,
)
# Act
train.form_dataset()
# Assert
split_total = (
_count_jpg(Path(today_ds, "train", "images"))
+ _count_jpg(Path(today_ds, "valid", "images"))
@@ -187,6 +163,7 @@ def test_rt_dsf_01_empty_processed_no_crash(
fixture_images_dir,
fixture_labels_dir,
):
# Arrange
train, today_ds = _prepare_form_dataset(
monkeypatch,
tmp_path,
@@ -196,12 +173,15 @@ def test_rt_dsf_01_empty_processed_no_crash(
0,
set(),
)
# Act
train.form_dataset()
# Assert
assert Path(today_ds).is_dir()
@pytest.mark.resource_limit
def test_rl_dsf_01_split_ratios_sum_hundred():
# Assert
import train
assert train.train_set + train.valid_set + train.test_set == 100
@@ -215,6 +195,7 @@ def test_rl_dsf_02_no_filename_duplication_across_splits(
fixture_images_dir,
fixture_labels_dir,
):
# Arrange
train, today_ds = _prepare_form_dataset(
monkeypatch,
tmp_path,
@@ -224,7 +205,9 @@ def test_rl_dsf_02_no_filename_duplication_across_splits(
100,
set(),
)
# Act
train.form_dataset()
# Assert
base = Path(today_ds)
names = []
for split in ("train", "valid", "test"):