Files
autopilot/misc/camera/a8/utilsTargetLocation.cpp
T
Nffj84 db2652d979 Added functionality to calculate target location.
Added functionality to capture camera frame from RTSP stream.
Refactored code.
Fixed some minor issues.
2024-06-18 17:29:40 +03:00

285 lines
11 KiB
C++

#include "utilsTargetLocation.h"
#include <QDebug>
#include <QImage>
#include "serialCommand.h"
#include "serialResponse.h"
#define USE_OPENCV
//#define USE_FFMPEG
#ifdef USE_OPENCV
#include <opencv2/opencv.hpp>
#endif
#ifdef USE_FFMPEG
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
#endif
UtilsTargetLocation::UtilsTargetLocation(SerialPort *serial, SerialCommand *command)
: mSerial(serial)
, mCommand(command)
{
// ... Optional: Additional initialization logic here
}
GPSData UtilsTargetLocation::getLocation(float altitude, float latitude, float lognitude, float yaw, float pitch, float roll, float targetTrueSize, uint16_t targetPixelSize)
{
if (mSerial == nullptr) {
qWarning() << "Cannot get geo location: Serial connection not available.";
}
if (mCommand == nullptr) {
qWarning() << "Cannot get geo location: Commands not available.";
}
// Capture image from rtsp stream
captureImageFromStream();
// TODO?
// Send image to AI for target identification and/or target choosing
// From the drone and camera
CameraData cameraData = getCameraData();
GPSData gpsData = {latitude, lognitude, altitude};
DroneData droneData = {gpsData, yaw, pitch, roll}; // GPS (latitude, longitude, altitude) and heading
// Calculate altitude and distance to the target
float targetDistance = calculateTargetDistance(targetTrueSize, targetPixelSize, cameraData.width, degreesToRadians(cameraData.fow));
// Calculate the bearing from the drone orientation and camera orientation
float targetBearing = fmod(droneData.yaw + cameraData.yaw, 360.0f);
// Calculate the GPS location of the target
return calculateTargetLocation(droneData, cameraData, targetDistance, targetBearing);
}
CameraData UtilsTargetLocation::getCameraData()
{
uint16_t height = CAMERA_RESOLUTION_HEIGHT;
uint16_t width = CAMERA_RESOLUTION_WIDTH;
float pitch = 0;
float yaw = 0;
float fov = CAMERA_FIELD_OF_VIEW_HORIZONTAL;
QByteArray response;
QHash<QString, QVariant> responseValues;
mSerial->sendCommand(mCommand->getCommandByID(COMMAND_ID::ACQUIRE_CURRENT_ZOOM)); // Field of view in degrees
response = mSerial->readResponse();
responseValues = SerialResponse::getResponceValues(response);
SerialResponse::printResponse(response);
fov = CAMERA_FIELD_OF_VIEW_HORIZONTAL / (responseValues["zoom"].toFloat() == 0.0 ? 1.0 : responseValues["zoom"].toFloat());
mSerial->sendCommand(mCommand->getCommandByID(COMMAND_ID::ACQUIRE_ATTITUDE_DATA)); // Yaw and pitch in degrees
response = mSerial->readResponse();
responseValues = SerialResponse::getResponceValues(response);
SerialResponse::printResponse(response);
yaw = responseValues["yaw"].toFloat();
pitch = responseValues["pitch"].toFloat();
mSerial->sendCommand(mCommand->getCommandByID(COMMAND_ID::ACQUIRE_CAMERA_CODEC_SPECS)); // Camera resolution in pixels
response = mSerial->readResponse();
responseValues = SerialResponse::getResponceValues(response);
SerialResponse::printResponse(response);
height = responseValues["height"].toUInt();
width = responseValues["width"].toUInt();
return {height, width, pitch, yaw, fov};
}
// Function to calculate distance from pixel size and target size
float UtilsTargetLocation::calculateTargetDistance(float targetTrueSize, uint16_t targetPixelSize, uint16_t imageWidth, float fov)
{
float focalLength = (imageWidth / 2) / tan(fov / 2);
return (targetTrueSize * focalLength) / targetPixelSize;
}
// Function to convert degrees to radians
float UtilsTargetLocation::degreesToRadians(float degrees)
{
return degrees * M_PI / 180.0f;
}
// Function to calculate the new GPS location
GPSData UtilsTargetLocation::calculateTargetLocation(DroneData drone, CameraData camera, float distance, float bearing)
{
constexpr float R = 6371000.0; // Earth radius in meters
float bearingRad = degreesToRadians(bearing);
float latRad = degreesToRadians(drone.gps.latitude);
float lonRad = degreesToRadians(drone.gps.longitude);
float newLatRad = asin(sin(latRad) * cos(distance / R) + cos(latRad) * sin(distance / R) * cos(bearingRad));
float newLonRad = lonRad + atan2(sin(bearingRad) * sin(distance / R) * cos(latRad), cos(distance / R) - sin(latRad) * sin(newLatRad));
float angleRad = camera.pitch * M_PI / 180.0;
float horizontalDistance = distance * cos(angleRad);
float newAltitude = drone.gps.altitude + (horizontalDistance * tan(angleRad));
GPSData newLocation;
newLocation.latitude = newLatRad * 180.0f / M_PI;
newLocation.longitude = newLonRad * 180.0f / M_PI;
newLocation.altitude = newAltitude;
return newLocation;
}
void UtilsTargetLocation::captureImageFromStream()
{
#ifdef USE_OPENCV
cv::VideoCapture cap;
cap.open(RTSP_ADDRESS);
if (!cap.isOpened()) {
qWarning() << "Error: Could not open RTSP stream";
return;
}
// Hack to capture proper image
// For some reason first frames get corrupted
uint8_t frameCount = 0;
while (true) {
cv::Mat frame;
if (cap.grab() && cap.retrieve(frame)) {
if (frame.empty() || frame.total() == 0) {
qInfo() << "Warning: Invalid frame captured, retrying...";
} else {
// Convert the frame to BGR if needed
if (frame.channels() == 1) {
qInfo() << "Captured frame is grayscale, converting to BGR";
cv::cvtColor(frame, frame, cv::COLOR_GRAY2BGR);
} else if (frame.channels() == 4) {
qInfo() << "Captured frame is RGBA, converting to BGR";
cv::cvtColor(frame, frame, cv::COLOR_RGBA2BGR);
}
frameCount++;
if (frameCount > 10) {
// Save the captured frame as an image
std::string filename = "output_opencv.jpg";
if (cv::imwrite(filename, frame)) {
qInfo() << "Image captured and saved as" << QString::fromStdString(filename);
break;
} else {
qInfo() << "Error: Failed to save the captured image";
}
} else if (frameCount > 100) {
qInfo() << "Error: Tried 100 times and still fails, giving up";
break;
}
}
} else {
qInfo() << "Warning: Could not grab or retrieve frame, retrying...";
}
}
// Release the video capture object
cap.release();
#endif
#ifdef USE_FFMPEG
// Initialize FFmpeg
avformat_network_init();
// Open RTSP stream
AVFormatContext *formatContext = avformat_alloc_context();
if (avformat_open_input(&formatContext, RTSP_ADDRESS, NULL, NULL) != 0) {
qDebug() << "Error: Couldn't open RTSP stream";
return;
}
// Find stream info
if (avformat_find_stream_info(formatContext, NULL) < 0) {
qDebug() << "Error: Couldn't find stream info";
return;
}
// Find video stream
int videoStreamIndex = -1;
for (uint i = 0; i < formatContext->nb_streams; i++) {
if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break;
}
}
if (videoStreamIndex == -1) {
qDebug() << "Error: Couldn't find video stream";
return;
}
// Initialize video decoder
AVCodecParameters *codecParameters = formatContext->streams[videoStreamIndex]->codecpar;
AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
AVCodecContext *codecContext = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(codecContext, codecParameters);
avcodec_open2(codecContext, codec, NULL);
// Allocate frame
AVFrame *frame = av_frame_alloc();
bool shouldBreak = false;
// Read frames
AVPacket packet;
while (shouldBreak == false && av_read_frame(formatContext, &packet) >= 0) {
if (packet.stream_index == videoStreamIndex) {
if (avcodec_send_packet(codecContext, &packet) >= 0) {
while (avcodec_receive_frame(codecContext, frame) >= 0) {
// Convert frame to RGB if necessary
if (codecParameters->format != AV_PIX_FMT_RGB24) {
AVFrame *rgbFrame = av_frame_alloc();
int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, codecParameters->width, codecParameters->height, 1);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
if (av_image_fill_arrays(rgbFrame->data, rgbFrame->linesize, buffer, AV_PIX_FMT_RGB24, codecParameters->width, codecParameters->height, 1) > 0) {
// Convert format to enum AVPixelFormat
const AVPixFmtDescriptor *pixDesc = av_pix_fmt_desc_get((AVPixelFormat) codecParameters->format);
if (!pixDesc) {
qDebug() << "Error: Unsupported pixel format";
return;
}
// Get the AVPixelFormat enum value directly from the
descriptor AVPixelFormat pixelFormat = (AVPixelFormat) codecParameters->format;
// Create the sws context
struct SwsContext *swsContext = sws_getContext(codecParameters->width, codecParameters->height, pixelFormat, codecParameters->width, codecParameters->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
sws_scale(swsContext, frame->data, frame->linesize, 0, codecParameters->height, rgbFrame->data, rgbFrame->linesize);
sws_freeContext(swsContext);
// Save frame as JPEG
QImage image(rgbFrame->data[0], codecParameters->width, codecParameters->height, QImage::Format_RGB888);
image.save("output_ffmpeg.jpg");
av_free(buffer);
av_frame_free(&rgbFrame);
shouldBreak = true;
break;
}
} else {
// Save frame as JPEG
QImage image(frame->data[0], codecParameters->width, codecParameters->height, QImage::Format_RGB888);
image.save("output_ffmpeg.jpg");
shouldBreak = true;
break;
}
}
}
}
av_packet_unref(&packet);
}
// Clean up
avformat_close_input(&formatContext);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
#endif
}