mirror of
https://github.com/azaion/gps-denied-desktop.git
synced 2026-04-22 07:06:37 +00:00
100 lines
3.1 KiB
Python
100 lines
3.1 KiB
Python
import matplotlib.pyplot as plt
|
|
from lightglue import viz2d
|
|
import numpy as np
|
|
import cv2
|
|
import time
|
|
|
|
# --- IMPORTS ---
|
|
# Assumes your matcher class is in 'lightglue_onnx.py'
|
|
from lightglue_onnx import LightGlueMatcher
|
|
from geolocator import GeoTracker
|
|
|
|
# --- CONFIGURATION ---
|
|
IMAGE_0_PATH = "gps-denied-ai-sandbox/docs/00_problem/input_data/AD000001.jpg"
|
|
IMAGE_1_PATH = "gps-denied-ai-sandbox/docs/00_problem/input_data/AD000002.jpg"
|
|
MODEL_PATH = "superpoint_lightglue_end2end_fused_gpu.onnx"
|
|
|
|
# FLIGHT DATA (Frame 0 Telemetry)
|
|
ALTITUDE = 100.0 # Meters
|
|
HEADING = 0.0 # Degrees (0 = North)
|
|
START_LAT = 48.275292
|
|
START_LON = 37.385220
|
|
|
|
# CAMERA SPECS
|
|
FOCAL_MM = 25.0
|
|
SENSOR_MM = 23.5
|
|
W_PX = 1920
|
|
H_PX = 1280
|
|
|
|
# Matcher Settings
|
|
PROCESS_DIM = 1024 # Size to resize images for processing
|
|
|
|
def main():
|
|
# 1. SETUP
|
|
print("1. Initializing Systems...")
|
|
matcher = LightGlueMatcher(MODEL_PATH, max_dimension=PROCESS_DIM)
|
|
geo = GeoTracker(FOCAL_MM, SENSOR_MM, W_PX, H_PX)
|
|
|
|
# 2. MATCHING
|
|
print(f"2. Matching Images...")
|
|
start_t = time.time()
|
|
|
|
# YOUR MATCHER RETURNS: F, mkpts0, mkpts1
|
|
# It already handles scaling internally!
|
|
F, kpts0, kpts1 = matcher.match(IMAGE_0_PATH, IMAGE_1_PATH)
|
|
|
|
dt = time.time() - start_t
|
|
|
|
# Check if matching failed (Your matcher returns empty arrays if <8 matches)
|
|
if len(kpts0) == 0:
|
|
print("[!] Not enough matches found.")
|
|
return
|
|
|
|
print(f" -> Success! Found {len(kpts0)} matches in {dt:.2f}s")
|
|
|
|
# 3. GEOLOCATION CALCULATION
|
|
print("3. Calculating Coordinates...")
|
|
nav_result = geo.calculate_movement(
|
|
kpts0, kpts1,
|
|
altitude=ALTITUDE,
|
|
yaw=HEADING,
|
|
start_lat=START_LAT,
|
|
start_lon=START_LON
|
|
)
|
|
|
|
# 4. REPORTING
|
|
title = "Calculation Failed"
|
|
|
|
if nav_result:
|
|
print("\n" + "="*45)
|
|
print(" 🛰️ VISUAL ODOMETRY RESULT")
|
|
print("="*45)
|
|
print(f" Start Pos: {START_LAT:.6f}, {START_LON:.6f}")
|
|
print("-" * 45)
|
|
print(f" Detected Shift: North {nav_result['move_north_m']:.2f} m")
|
|
print(f" East {nav_result['move_east_m']:.2f} m")
|
|
print("-" * 45)
|
|
print(f" NEW LATITUDE: {nav_result['new_lat']:.8f}") # <--- RESULT
|
|
print(f" NEW LONGITUDE: {nav_result['new_lon']:.8f}") # <--- RESULT
|
|
print("="*45 + "\n")
|
|
|
|
title = (f"Shift: N {nav_result['move_north_m']:.1f}m, E {nav_result['move_east_m']:.1f}m\n"
|
|
f"Lat: {nav_result['new_lat']:.6f}, Lon: {nav_result['new_lon']:.6f}")
|
|
else:
|
|
print("[!] Math failed (likely bad geometry or points at infinity).")
|
|
|
|
# 5. VISUALIZATION
|
|
# We must load images manually because your matcher doesn't return them
|
|
img0 = cv2.imread(IMAGE_0_PATH)
|
|
img1 = cv2.imread(IMAGE_1_PATH)
|
|
# Convert BGR to RGB for matplotlib
|
|
img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
|
|
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
|
|
|
|
viz2d.plot_images([img0, img1])
|
|
viz2d.plot_matches(kpts0, kpts1, color='lime')
|
|
plt.suptitle(title, fontsize=14, backgroundcolor='white')
|
|
plt.show()
|
|
|
|
if __name__ == "__main__":
|
|
main() |