mirror of
https://github.com/azaion/gps-denied-onboard.git
synced 2026-04-22 08:36:36 +00:00
288 lines
12 KiB
Python
288 lines
12 KiB
Python
import pytest
|
|
import numpy as np
|
|
from unittest.mock import Mock, patch
|
|
|
|
from f07_sequential_visual_odometry import SequentialVisualOdometry, Features, Matches, Motion
|
|
from f02_1_flight_lifecycle_manager import CameraParameters
|
|
|
|
@pytest.fixture
|
|
def svo():
|
|
return SequentialVisualOdometry()
|
|
|
|
@pytest.fixture
|
|
def mock_model_manager():
|
|
manager = Mock()
|
|
# Mock SuperPoint output: 500 keypoints
|
|
kpts = np.random.rand(500, 2) * [1920, 1080]
|
|
desc = np.random.rand(500, 256).astype(np.float32)
|
|
scores = np.random.rand(500).astype(np.float32)
|
|
manager.run_superpoint.return_value = (kpts, desc, scores)
|
|
|
|
# Mock LightGlue output
|
|
num_matches = 300
|
|
matches0 = np.random.choice(500, num_matches, replace=False)
|
|
matches1 = np.random.choice(500, num_matches, replace=False)
|
|
match_indices = np.stack([matches0, matches1], axis=-1)
|
|
match_scores = np.random.rand(num_matches).astype(np.float32)
|
|
manager.run_lightglue.return_value = (match_indices, match_scores)
|
|
|
|
return manager
|
|
|
|
@pytest.fixture
|
|
def camera_params():
|
|
return CameraParameters(focal_length_mm=25.0, sensor_width_mm=36.0, resolution={"width": 1920, "height": 1080})
|
|
|
|
class TestSequentialVisualOdometry:
|
|
|
|
# --- Feature 07.01: Feature Extraction ---
|
|
|
|
def test_preprocess_image_rgb(self, svo):
|
|
rgb_img = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
|
|
preprocessed = svo._preprocess_image(rgb_img)
|
|
|
|
assert len(preprocessed.shape) == 2 # Grayscale
|
|
assert preprocessed.shape == (1080, 1920)
|
|
assert preprocessed.dtype == np.float32
|
|
assert np.max(preprocessed) <= 1.0 # Normalized
|
|
|
|
def test_preprocess_image_grayscale_passthrough(self, svo):
|
|
gray_img = np.random.randint(0, 255, (1080, 1920), dtype=np.uint8)
|
|
preprocessed = svo._preprocess_image(gray_img)
|
|
|
|
assert len(preprocessed.shape) == 2
|
|
assert preprocessed.shape == (1080, 1920)
|
|
assert preprocessed.dtype == np.float32
|
|
|
|
def test_extract_features_empty_image(self, svo):
|
|
empty_img = np.array([])
|
|
features = svo.extract_features(empty_img)
|
|
|
|
assert features.keypoints.shape == (0, 2)
|
|
assert features.descriptors.shape == (0, 256)
|
|
assert len(features.scores) == 0
|
|
|
|
def test_extract_features_descriptor_dimensions(self, svo):
|
|
img = np.zeros((480, 640, 3), dtype=np.uint8)
|
|
features = svo.extract_features(img)
|
|
|
|
# Using fallback mock, should return up to 2000 points
|
|
n_points = len(features.keypoints)
|
|
assert features.descriptors.shape == (n_points, 256)
|
|
assert features.scores.shape == (n_points,)
|
|
|
|
def test_extract_features_feature_count_range(self, svo):
|
|
img = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
|
|
features = svo.extract_features(img)
|
|
|
|
# Should cap at 2000 points via NMS fallback
|
|
assert len(features.keypoints) <= 2000
|
|
assert len(features.keypoints) > 0
|
|
|
|
def test_model_manager_integration(self, mock_model_manager):
|
|
svo_with_model = SequentialVisualOdometry(model_manager=mock_model_manager)
|
|
img = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8)
|
|
|
|
features = svo_with_model.extract_features(img)
|
|
|
|
# Should use the mocked returns
|
|
mock_model_manager.run_superpoint.assert_called_once()
|
|
assert len(features.keypoints) == 500
|
|
assert features.descriptors.shape == (500, 256)
|
|
assert len(features.scores) == 500
|
|
|
|
# --- Feature 07.02: Feature Matching ---
|
|
|
|
def test_match_features_high_overlap(self, svo):
|
|
# Simulate high overlap by matching features with themselves
|
|
img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
|
features = svo.extract_features(img)
|
|
|
|
matches = svo.match_features(features, features)
|
|
|
|
assert len(matches.matches) > 0
|
|
assert len(matches.matches) == len(matches.scores)
|
|
assert matches.keypoints1.shape == matches.keypoints2.shape
|
|
|
|
def test_match_features_no_overlap(self, svo):
|
|
# Simulate no overlap with two different random images
|
|
img1 = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
|
img2 = np.random.randint(1, 254, (480, 640, 3), dtype=np.uint8)
|
|
features1 = svo.extract_features(img1)
|
|
features2 = svo.extract_features(img2)
|
|
|
|
matches = svo.match_features(features1, features2)
|
|
|
|
# Mock should still produce some matches, but a real system would have very few
|
|
assert matches is not None
|
|
|
|
def test_match_features_empty_input(self, svo):
|
|
empty_features = Features(keypoints=np.empty((0, 2)), descriptors=np.empty((0, 256)), scores=np.empty((0,)))
|
|
img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
|
features = svo.extract_features(img)
|
|
|
|
matches = svo.match_features(empty_features, features)
|
|
assert len(matches.matches) == 0
|
|
|
|
matches2 = svo.match_features(features, empty_features)
|
|
assert len(matches2.matches) == 0
|
|
|
|
def test_match_features_confidence_filtering(self, svo, mock_model_manager):
|
|
svo_with_model = SequentialVisualOdometry(model_manager=mock_model_manager)
|
|
img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
|
features = svo_with_model.extract_features(img)
|
|
|
|
# Mock LightGlue to return scores both above and below a threshold
|
|
scores = np.array([0.9, 0.95, 0.2, 0.8, 0.1])
|
|
matches = np.random.randint(0, 500, (5, 2))
|
|
mock_model_manager.run_lightglue.return_value = (matches, scores)
|
|
|
|
# Set a high confidence threshold in the SVO logic to test filtering
|
|
with patch.object(svo_with_model, '_filter_matches_by_confidence', wraps=lambda m, s, t: (m[s>0.7], s[s>0.7])):
|
|
result_matches = svo_with_model.match_features(features, features)
|
|
assert len(result_matches.matches) == 3 # Only 3 scores are > 0.7
|
|
|
|
# --- Feature 07.03: Relative Pose Computation ---
|
|
|
|
def test_normalize_keypoints(self, svo, camera_params):
|
|
kpts = np.array([[960.0, 540.0], [0.0, 0.0]])
|
|
norm = svo._normalize_keypoints(kpts, camera_params)
|
|
# Center should be roughly 0, 0
|
|
assert np.allclose(norm[0], [0.0, 0.0])
|
|
# Top left should be negative
|
|
assert norm[1, 0] < 0
|
|
assert norm[1, 1] < 0
|
|
|
|
def test_estimate_essential_matrix_insufficient(self, svo, camera_params):
|
|
K = svo._get_camera_matrix(camera_params)
|
|
pts1 = np.random.rand(5, 2)
|
|
pts2 = np.random.rand(5, 2)
|
|
E, mask = svo._estimate_essential_matrix(pts1, pts2, K)
|
|
assert E is None
|
|
|
|
def test_estimate_essential_matrix_good(self, svo, camera_params):
|
|
K = svo._get_camera_matrix(camera_params)
|
|
pts1 = np.random.rand(100, 2) * 1000
|
|
pts2 = pts1 + np.random.normal(0, 1, (100, 2))
|
|
E, mask = svo._estimate_essential_matrix(pts1, pts2, K)
|
|
assert E is not None
|
|
assert E.shape == (3, 3)
|
|
|
|
def test_decompose_essential_matrix(self, svo, camera_params):
|
|
K = svo._get_camera_matrix(camera_params)
|
|
pts1 = np.random.rand(100, 2) * 1000
|
|
pts2 = pts1 + np.array([10.0, 0.0])
|
|
E, mask = svo._estimate_essential_matrix(pts1, pts2, K)
|
|
|
|
R, t = svo._decompose_essential_matrix(E, pts1, pts2, K)
|
|
assert R is not None
|
|
assert t is not None
|
|
assert np.isclose(np.linalg.det(R), 1.0, atol=1e-3)
|
|
assert np.isclose(np.linalg.norm(t), 1.0, atol=1e-3)
|
|
|
|
def test_compute_tracking_quality(self, svo):
|
|
# Good
|
|
conf, good = svo._compute_tracking_quality(100, 150)
|
|
assert good is True
|
|
assert conf > 0.5
|
|
|
|
# Degraded
|
|
conf, good = svo._compute_tracking_quality(30, 50)
|
|
assert good is True
|
|
assert conf < 0.5
|
|
|
|
# Lost
|
|
conf, good = svo._compute_tracking_quality(10, 20)
|
|
assert good is False
|
|
assert conf == 0.0
|
|
|
|
@patch.object(SequentialVisualOdometry, 'extract_features')
|
|
@patch.object(SequentialVisualOdometry, 'match_features')
|
|
def test_compute_relative_pose_pipeline(self, mock_match, mock_extract, svo, camera_params):
|
|
img1 = np.zeros((100, 100))
|
|
img2 = np.zeros((100, 100))
|
|
|
|
mock_extract.return_value = Features(keypoints=np.empty((0,2)), descriptors=np.empty((0,256)), scores=np.empty(0))
|
|
mock_match.return_value = Matches(
|
|
matches=np.zeros((100, 2)), scores=np.ones(100),
|
|
keypoints1=np.random.rand(100, 2) * 1000, keypoints2=np.random.rand(100, 2) * 1000
|
|
)
|
|
|
|
pose = svo.compute_relative_pose(img1, img2, camera_params)
|
|
|
|
assert mock_extract.call_count == 2
|
|
assert mock_match.call_count == 1
|
|
assert pose is not None
|
|
assert pose.tracking_good is not None
|
|
|
|
# --- Feature 07.03: Relative Pose Computation ---
|
|
|
|
def test_normalize_keypoints(self, svo, camera_params):
|
|
kpts = np.array([[960.0, 540.0], [0.0, 0.0]])
|
|
norm = svo._normalize_keypoints(kpts, camera_params)
|
|
# Center should be roughly 0, 0
|
|
assert np.allclose(norm[0], [0.0, 0.0])
|
|
# Top left should be negative
|
|
assert norm[1, 0] < 0
|
|
assert norm[1, 1] < 0
|
|
|
|
def test_estimate_essential_matrix_insufficient(self, svo, camera_params):
|
|
K = svo._get_camera_matrix(camera_params)
|
|
pts1 = np.random.rand(5, 2)
|
|
pts2 = np.random.rand(5, 2)
|
|
E, mask = svo._estimate_essential_matrix(pts1, pts2, K)
|
|
assert E is None
|
|
|
|
def test_estimate_essential_matrix_good(self, svo, camera_params):
|
|
K = svo._get_camera_matrix(camera_params)
|
|
pts1 = np.random.rand(100, 2) * 1000
|
|
pts2 = pts1 + np.random.normal(0, 1, (100, 2))
|
|
E, mask = svo._estimate_essential_matrix(pts1, pts2, K)
|
|
assert E is not None
|
|
assert E.shape == (3, 3)
|
|
|
|
def test_decompose_essential_matrix(self, svo, camera_params):
|
|
K = svo._get_camera_matrix(camera_params)
|
|
pts1 = np.random.rand(100, 2) * 1000
|
|
pts2 = pts1 + np.array([10.0, 0.0])
|
|
E, mask = svo._estimate_essential_matrix(pts1, pts2, K)
|
|
|
|
R, t = svo._decompose_essential_matrix(E, pts1, pts2, K)
|
|
assert R is not None
|
|
assert t is not None
|
|
assert np.isclose(np.linalg.det(R), 1.0, atol=1e-3)
|
|
assert np.isclose(np.linalg.norm(t), 1.0, atol=1e-3)
|
|
|
|
def test_compute_tracking_quality(self, svo):
|
|
# Good
|
|
conf, good = svo._compute_tracking_quality(100, 150)
|
|
assert good is True
|
|
assert conf > 0.5
|
|
|
|
# Degraded
|
|
conf, good = svo._compute_tracking_quality(30, 50)
|
|
assert good is True
|
|
assert conf < 0.5
|
|
|
|
# Lost
|
|
conf, good = svo._compute_tracking_quality(10, 20)
|
|
assert good is False
|
|
assert conf == 0.0
|
|
|
|
@patch.object(SequentialVisualOdometry, 'extract_features')
|
|
@patch.object(SequentialVisualOdometry, 'match_features')
|
|
def test_compute_relative_pose_pipeline(self, mock_match, mock_extract, svo, camera_params):
|
|
img1 = np.zeros((100, 100))
|
|
img2 = np.zeros((100, 100))
|
|
|
|
mock_extract.return_value = Features(keypoints=np.empty((0,2)), descriptors=np.empty((0,256)), scores=np.empty(0))
|
|
mock_match.return_value = Matches(
|
|
matches=np.zeros((100, 2)), scores=np.ones(100),
|
|
keypoints1=np.random.rand(100, 2) * 1000, keypoints2=np.random.rand(100, 2) * 1000
|
|
)
|
|
|
|
pose = svo.compute_relative_pose(img1, img2, camera_params)
|
|
|
|
assert mock_extract.call_count == 2
|
|
assert mock_match.call_count == 1
|
|
assert pose is not None
|
|
assert pose.tracking_good is not None |