mirror of
https://github.com/azaion/gps-denied-onboard.git
synced 2026-04-23 02:26:37 +00:00
094895b21b
Phase 2 — Visual Odometry: - ORBVisualOdometry (dev/CI), CuVSLAMVisualOdometry (Jetson) - TRTInferenceEngine (TensorRT FP16, conditional import) - create_vo_backend() factory Phase 3 — Satellite Matching + GPR: - SatelliteDataManager: local z/x/y tiles, ESKF ±3σ tile selection - GSD normalization (SAT-03), RANSAC inlier-ratio confidence (SAT-04) - GlobalPlaceRecognition: Faiss index + numpy fallback Phase 4 — MAVLink I/O: - MAVLinkBridge: GPS_INPUT 15+ fields, IMU callback, 1Hz telemetry - 3-consecutive-failure reloc request - MockMAVConnection for CI Phase 5 — Pipeline Wiring: - ESKF wired into process_frame: VO update → satellite update - CoordinateTransformer + SatelliteDataManager via DI - MAVLink state push per frame (PIPE-07) - Real pixel_to_gps via ray-ground projection (PIPE-06) - GTSAM ISAM2 update when available (PIPE-03) Phase 6 — Docker + CI: - Multi-stage Dockerfile (python:3.11-slim) - docker-compose.yml (dev), docker-compose.sitl.yml (ArduPilot SITL) - GitHub Actions: ci.yml (lint+pytest+docker smoke), sitl.yml (nightly) - tests/test_sitl_integration.py (8 tests, skip without SITL) Phase 7 — Accuracy Validation: - AccuracyBenchmark + SyntheticTrajectory - AC-PERF-1: 80% within 50m ✅ - AC-PERF-2: 60% within 20m ✅ - AC-PERF-3: p95 latency < 400ms ✅ - AC-PERF-4: VO drift 1km < 100m ✅ (actual ~11m) - scripts/benchmark_accuracy.py CLI Tests: 195 passed / 8 skipped Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
118 lines
4.1 KiB
Python
118 lines
4.1 KiB
Python
"""Tests for Image Input Pipeline (F05)."""
|
|
|
|
import asyncio
|
|
|
|
import cv2
|
|
import numpy as np
|
|
import pytest
|
|
|
|
from gps_denied.core.pipeline import ImageInputPipeline, QueueFullError, ValidationError
|
|
from gps_denied.schemas.image import ImageBatch
|
|
|
|
|
|
@pytest.fixture
|
|
def pipeline(tmp_path):
|
|
storage = str(tmp_path / "images")
|
|
return ImageInputPipeline(storage_dir=storage, max_queue_size=2)
|
|
|
|
|
|
def test_batch_validation(pipeline):
|
|
# VO-05: minimum batch size is now 1 (not 10)
|
|
# Zero images is still invalid
|
|
b0 = ImageBatch(images=[], filenames=[], start_sequence=1, end_sequence=0, batch_number=1)
|
|
val0 = pipeline.validate_batch(b0)
|
|
assert not val0.valid
|
|
assert "Batch is empty" in val0.errors
|
|
|
|
# Single image is now valid
|
|
b1 = ImageBatch(images=[b"fake"], filenames=["AD000001.jpg"], start_sequence=1, end_sequence=1, batch_number=1)
|
|
val1 = pipeline.validate_batch(b1)
|
|
assert val1.valid, f"Single-image batch should be valid; errors: {val1.errors}"
|
|
|
|
# 2-image batch — also valid under new rule
|
|
b2 = ImageBatch(images=[b"1", b"2"], filenames=["AD000001.jpg", "AD000002.jpg"], start_sequence=1, end_sequence=2, batch_number=1)
|
|
val2 = pipeline.validate_batch(b2)
|
|
assert val2.valid
|
|
|
|
# Large valid batch
|
|
fake_imgs = [b"fake"] * 10
|
|
fake_names = [f"AD{i:06d}.jpg" for i in range(1, 11)]
|
|
b10 = ImageBatch(images=fake_imgs, filenames=fake_names, start_sequence=1, end_sequence=10, batch_number=1)
|
|
val10 = pipeline.validate_batch(b10)
|
|
assert val10.valid
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_queue_and_process(pipeline):
|
|
flight_id = "test_f1"
|
|
|
|
# Create valid fake images
|
|
fake_img_np = np.zeros((10, 10, 3), dtype=np.uint8)
|
|
_, encoded = cv2.imencode(".jpg", fake_img_np)
|
|
fake_bytes = encoded.tobytes()
|
|
|
|
fake_imgs = [fake_bytes] * 10
|
|
fake_names = [f"AD{i:06d}.jpg" for i in range(1, 11)]
|
|
b = ImageBatch(images=fake_imgs, filenames=fake_names, start_sequence=1, end_sequence=10, batch_number=1)
|
|
|
|
pipeline.queue_batch(flight_id, b)
|
|
|
|
# Process
|
|
processed = await pipeline.process_next_batch(flight_id)
|
|
assert processed is not None
|
|
assert len(processed.images) == 10
|
|
assert processed.images[0].sequence == 1
|
|
assert processed.images[-1].sequence == 10
|
|
|
|
# Status
|
|
st = pipeline.get_processing_status(flight_id)
|
|
assert st.total_images == 10
|
|
assert st.processed_images == 10
|
|
|
|
# Sequential get
|
|
next_img = pipeline.get_next_image(flight_id)
|
|
assert next_img is not None
|
|
assert next_img.sequence == 1
|
|
|
|
# Second get
|
|
next_img2 = pipeline.get_next_image(flight_id)
|
|
assert next_img2 is not None
|
|
assert next_img2.sequence == 2
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_exact_sequence_lookup_no_collision(pipeline, tmp_path):
|
|
"""VO-05: sequence 1 must NOT match AD000011.jpg or AD000010.jpg."""
|
|
flight_id = "exact_test"
|
|
fake_img_np = np.zeros((10, 10, 3), dtype=np.uint8)
|
|
_, encoded = cv2.imencode(".jpg", fake_img_np)
|
|
fake_bytes = encoded.tobytes()
|
|
|
|
# Sequences 1 and 11 stored in the same flight
|
|
names = ["AD000001.jpg", "AD000011.jpg"]
|
|
imgs = [fake_bytes, fake_bytes]
|
|
b = ImageBatch(images=imgs, filenames=names, start_sequence=1, end_sequence=11, batch_number=1)
|
|
pipeline.queue_batch(flight_id, b)
|
|
await pipeline.process_next_batch(flight_id)
|
|
|
|
img1 = pipeline.get_image_by_sequence(flight_id, 1)
|
|
img11 = pipeline.get_image_by_sequence(flight_id, 11)
|
|
|
|
assert img1 is not None
|
|
assert img1.filename == "AD000001.jpg", f"Expected AD000001.jpg, got {img1.filename}"
|
|
assert img11 is not None
|
|
assert img11.filename == "AD000011.jpg", f"Expected AD000011.jpg, got {img11.filename}"
|
|
|
|
|
|
def test_queue_full(pipeline):
|
|
flight_id = "test_full"
|
|
fake_imgs = [b"fake"] * 10
|
|
fake_names = [f"AD{i:06d}.jpg" for i in range(1, 11)]
|
|
b = ImageBatch(images=fake_imgs, filenames=fake_names, start_sequence=1, end_sequence=10, batch_number=1)
|
|
|
|
pipeline.queue_batch(flight_id, b)
|
|
pipeline.queue_batch(flight_id, b)
|
|
|
|
with pytest.raises(QueueFullError):
|
|
pipeline.queue_batch(flight_id, b)
|