Files
gps-denied-onboard/tests/test_vo.py
T
Yuzviak 62dc3781b6 refactor(vo): address code review for CuVSLAMMonoDepthVisualOdometry
- test_depth_hint_scales_translation: replace assert True with mock-based verification of scale factor
- _init_tracker / _compute_via_cuvslam: logger.exception for stack traces
- _init_tracker: loud warning when Jetson path disabled
- drop personal attribution (git blame suffices)
- translate Ukrainian test docstrings to English
2026-04-18 16:17:09 +03:00

304 lines
11 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""Tests for Sequential Visual Odometry (F07)."""
import numpy as np
import pytest
from gps_denied.core.models import ModelManager
from gps_denied.core.vo import (
CuVSLAMVisualOdometry,
ISequentialVisualOdometry,
ORBVisualOdometry,
SequentialVisualOdometry,
create_vo_backend,
)
from gps_denied.schemas import CameraParameters
from gps_denied.schemas.vo import Features, Matches
@pytest.fixture
def vo():
manager = ModelManager()
return SequentialVisualOdometry(manager)
@pytest.fixture
def cam_params():
return CameraParameters(
focal_length=5.0,
sensor_width=6.4,
sensor_height=4.8,
resolution_width=640,
resolution_height=480,
principal_point=(320.0, 240.0)
)
def test_extract_features(vo):
img = np.zeros((480, 640, 3), dtype=np.uint8)
features = vo.extract_features(img)
assert isinstance(features, Features)
assert features.keypoints.shape == (500, 2)
assert features.descriptors.shape == (500, 256)
def test_match_features(vo):
f1 = Features(
keypoints=np.random.rand(100, 2),
descriptors=np.random.rand(100, 256),
scores=np.random.rand(100)
)
f2 = Features(
keypoints=np.random.rand(100, 2),
descriptors=np.random.rand(100, 256),
scores=np.random.rand(100)
)
matches = vo.match_features(f1, f2)
assert isinstance(matches, Matches)
assert matches.matches.shape == (100, 2)
def test_estimate_motion_insufficient_matches(vo, cam_params):
matches = Matches(
matches=np.zeros((5, 2)),
scores=np.zeros(5),
keypoints1=np.zeros((5, 2)),
keypoints2=np.zeros((5, 2))
)
# Less than 8 points should return None
motion = vo.estimate_motion(matches, cam_params)
assert motion is None
def test_estimate_motion_synthetic(vo, cam_params):
# To reliably test compute_relative_pose, we create points strictly satisfying epipolar constraint
# Simple straight motion: Add a small shift on X axis
n_pts = 100
pts1 = np.random.rand(n_pts, 2) * 400 + 100
pts2 = pts1 + np.array([10.0, 0.0]) # moving 10 pixels right
matches = Matches(
matches=np.column_stack([np.arange(n_pts), np.arange(n_pts)]),
scores=np.ones(n_pts),
keypoints1=pts1,
keypoints2=pts2
)
motion = vo.estimate_motion(matches, cam_params)
assert motion is not None
assert motion.inlier_count > 20
assert motion.translation.shape == (3,)
assert motion.rotation.shape == (3, 3)
def test_compute_relative_pose(vo, cam_params):
img1 = np.zeros((480, 640, 3), dtype=np.uint8)
img2 = np.zeros((480, 640, 3), dtype=np.uint8)
# Given the random nature of our mock, OpenCV's findEssentialMat will likely find 0 inliers
# or fail. We expect compute_relative_pose to gracefully return None or low confidence.
pose = vo.compute_relative_pose(img1, img2, cam_params)
if pose is not None:
assert pose.translation.shape == (3,)
assert pose.rotation.shape == (3, 3)
# Because we randomize points in the mock manager, inliers will be extremely low
assert pose.tracking_good is False
# ---------------------------------------------------------------
# VO-02: ORBVisualOdometry interface contract
# ---------------------------------------------------------------
@pytest.fixture
def orb_vo():
return ORBVisualOdometry()
def test_orb_implements_interface(orb_vo):
"""ORBVisualOdometry must satisfy ISequentialVisualOdometry."""
assert isinstance(orb_vo, ISequentialVisualOdometry)
def test_orb_extract_features(orb_vo):
img = np.zeros((480, 640, 3), dtype=np.uint8)
feats = orb_vo.extract_features(img)
assert isinstance(feats, Features)
# Black image has no corners — empty result is valid
assert feats.keypoints.ndim == 2 and feats.keypoints.shape[1] == 2
def test_orb_match_features(orb_vo):
"""match_features returns Matches even when features are empty."""
empty_f = Features(
keypoints=np.zeros((0, 2), dtype=np.float32),
descriptors=np.zeros((0, 32), dtype=np.float32),
scores=np.zeros(0, dtype=np.float32),
)
m = orb_vo.match_features(empty_f, empty_f)
assert isinstance(m, Matches)
assert m.matches.shape[1] == 2 if len(m.matches) > 0 else True
def test_orb_compute_relative_pose_synthetic(orb_vo, cam_params):
"""ORB can track a small synthetic shift between frames."""
base = np.random.randint(50, 200, (480, 640, 3), dtype=np.uint8)
shifted = np.roll(base, 10, axis=1) # shift 10px right
pose = orb_vo.compute_relative_pose(base, shifted, cam_params)
# May return None on blank areas, but if not None must be well-formed
if pose is not None:
assert pose.translation.shape == (3,)
assert pose.rotation.shape == (3, 3)
assert pose.scale_ambiguous is True # ORB = monocular = scale ambiguous
def test_orb_scale_ambiguous(orb_vo, cam_params):
"""ORB RelativePose always has scale_ambiguous=True (monocular)."""
img1 = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
img2 = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
pose = orb_vo.compute_relative_pose(img1, img2, cam_params)
if pose is not None:
assert pose.scale_ambiguous is True
# ---------------------------------------------------------------
# VO-01: CuVSLAMVisualOdometry (dev/CI fallback path)
# ---------------------------------------------------------------
def test_cuvslam_implements_interface():
"""CuVSLAMVisualOdometry satisfies ISequentialVisualOdometry on dev/CI."""
vo = CuVSLAMVisualOdometry()
assert isinstance(vo, ISequentialVisualOdometry)
def test_cuvslam_scale_not_ambiguous_on_dev(cam_params):
"""On dev/CI (no cuVSLAM), CuVSLAMVO still marks scale_ambiguous=False (metric intent)."""
vo = CuVSLAMVisualOdometry()
img1 = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
img2 = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
pose = vo.compute_relative_pose(img1, img2, cam_params)
if pose is not None:
assert pose.scale_ambiguous is False # VO-04
# ---------------------------------------------------------------
# VO-03: ModelManager auto-selects Mock on dev/CI
# ---------------------------------------------------------------
def test_model_manager_mock_on_dev():
"""On non-Jetson, get_inference_engine returns MockInferenceEngine."""
from gps_denied.core.models import MockInferenceEngine
manager = ModelManager()
engine = manager.get_inference_engine("SuperPoint")
# On dev/CI we always get Mock
assert isinstance(engine, MockInferenceEngine)
def test_model_manager_trt_engine_loader():
"""TRTInferenceEngine falls back to Mock when engine file is absent."""
from gps_denied.core.models import TRTInferenceEngine
engine = TRTInferenceEngine("SuperPoint", "/nonexistent/superpoint.engine")
# Must not crash; should have a mock fallback
assert engine._mock_fallback is not None
# Infer via mock fallback must work
dummy_img = np.zeros((480, 640, 3), dtype=np.uint8)
result = engine.infer(dummy_img)
assert "keypoints" in result
# ---------------------------------------------------------------
# Factory: create_vo_backend
# ---------------------------------------------------------------
def test_create_vo_backend_returns_interface():
"""create_vo_backend() always returns an ISequentialVisualOdometry."""
manager = ModelManager()
backend = create_vo_backend(model_manager=manager)
assert isinstance(backend, ISequentialVisualOdometry)
def test_create_vo_backend_orb_fallback():
"""Without model_manager and no cuVSLAM, falls back to ORBVisualOdometry."""
backend = create_vo_backend(model_manager=None)
assert isinstance(backend, ORBVisualOdometry)
# ---------------------------------------------------------------------------
# CuVSLAMMonoDepthVisualOdometry tests
# ---------------------------------------------------------------------------
def test_mono_depth_implements_interface():
"""CuVSLAMMonoDepthVisualOdometry implements ISequentialVisualOdometry."""
from gps_denied.core.vo import CuVSLAMMonoDepthVisualOdometry
vo = CuVSLAMMonoDepthVisualOdometry(depth_hint_m=600.0)
assert isinstance(vo, ISequentialVisualOdometry)
def test_mono_depth_scale_not_ambiguous():
"""Mono-Depth backend always returns scale_ambiguous=False."""
from gps_denied.core.vo import CuVSLAMMonoDepthVisualOdometry
vo = CuVSLAMMonoDepthVisualOdometry(depth_hint_m=600.0)
prev = np.zeros((480, 640), dtype=np.uint8)
curr = np.zeros((480, 640), dtype=np.uint8)
cam = CameraParameters(
focal_length=16.0, sensor_width=23.2, sensor_height=17.4,
resolution_width=640, resolution_height=480,
)
pose = vo.compute_relative_pose(prev, curr, cam)
# may be None when images are blank — but if present, scale_ambiguous=False
if pose is not None:
assert pose.scale_ambiguous is False
def test_mono_depth_depth_hint_scales_translation():
"""depth_hint_m масштабує translation у dev/CI ORB fallback by depth_hint_m / 600.0."""
from unittest.mock import patch
from gps_denied.core.vo import CuVSLAMMonoDepthVisualOdometry, ORBVisualOdometry
from gps_denied.schemas.vo import RelativePose
cam = CameraParameters(
focal_length=16.0, sensor_width=23.2, sensor_height=17.4,
resolution_width=640, resolution_height=480,
)
img = np.zeros((480, 640), dtype=np.uint8)
# Mock ORB to always return a unit translation — lets us verify the scale factor cleanly
unit_pose = RelativePose(
translation=np.array([1.0, 0.0, 0.0]),
rotation=np.eye(3),
confidence=1.0,
inlier_count=100,
total_matches=100,
tracking_good=True,
scale_ambiguous=True,
)
with patch.object(ORBVisualOdometry, "compute_relative_pose", return_value=unit_pose):
vo_300 = CuVSLAMMonoDepthVisualOdometry(depth_hint_m=300.0)
vo_600 = CuVSLAMMonoDepthVisualOdometry(depth_hint_m=600.0)
vo_1200 = CuVSLAMMonoDepthVisualOdometry(depth_hint_m=1200.0)
pose_300 = vo_300.compute_relative_pose(img, img, cam)
pose_600 = vo_600.compute_relative_pose(img, img, cam)
pose_1200 = vo_1200.compute_relative_pose(img, img, cam)
# At 600m reference altitude, scale = 1.0 → unchanged
assert np.allclose(pose_600.translation, [1.0, 0.0, 0.0])
# At 300m, scale = 0.5 → halved
assert np.allclose(pose_300.translation, [0.5, 0.0, 0.0])
# At 1200m, scale = 2.0 → doubled
assert np.allclose(pose_1200.translation, [2.0, 0.0, 0.0])
# All must report metric scale
assert pose_300.scale_ambiguous is False
assert pose_600.scale_ambiguous is False
assert pose_1200.scale_ambiguous is False
def test_mono_depth_create_vo_backend_selects_it():
"""create_vo_backend with prefer_mono_depth=True returns CuVSLAMMonoDepthVisualOdometry."""
from gps_denied.core.vo import CuVSLAMMonoDepthVisualOdometry, create_vo_backend
vo = create_vo_backend(prefer_mono_depth=True, depth_hint_m=600.0)
assert isinstance(vo, CuVSLAMMonoDepthVisualOdometry)