Refactored a8 codes and added remote testing app a8_remote.

This commit is contained in:
Nffj84
2024-07-02 11:31:12 +03:00
parent c03d477c45
commit b39e58dbc1
499 changed files with 1209 additions and 463 deletions
+28 -215
View File
@@ -1,48 +1,12 @@
#include "utilsTargetLocation.h"
#include "utilsTargetLocation.hpp"
#include <QDebug>
#include <QImage>
#include "serialCommand.h"
#include "serialResponse.h"
#define USE_OPENCV
//#define USE_FFMPEG
#ifdef USE_OPENCV
#include "config.hpp"
#include "defines.hpp"
#include <opencv2/opencv.hpp>
#endif
#ifdef USE_FFMPEG
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
#endif
UtilsTargetLocation::UtilsTargetLocation(SerialPort *serial, SerialCommand *command)
: mSerial(serial)
, mCommand(command)
{
// ... Optional: Additional initialization logic here
}
GPSData UtilsTargetLocation::getLocation(float altitude, float latitude, float lognitude, float yaw, float pitch, float roll, float targetTrueSize, uint16_t targetPixelSize)
{
if (mSerial == nullptr) {
qWarning() << "Cannot get geo location: Serial connection not available.";
}
if (mCommand == nullptr) {
qWarning() << "Cannot get geo location: Commands not available.";
}
// Capture image from rtsp stream
captureImageFromStream();
// TODO?
// Send image to AI for target identification and/or target choosing
// From the drone and camera
CameraData cameraData = getCameraData();
GPSData gpsData = {latitude, lognitude, altitude};
@@ -60,33 +24,12 @@ GPSData UtilsTargetLocation::getLocation(float altitude, float latitude, float l
CameraData UtilsTargetLocation::getCameraData()
{
uint16_t height = CAMERA_RESOLUTION_HEIGHT;
uint16_t width = CAMERA_RESOLUTION_WIDTH;
float pitch = 0;
float yaw = 0;
float fov = CAMERA_FIELD_OF_VIEW_HORIZONTAL;
QByteArray response;
QHash<QString, QVariant> responseValues;
mSerial->sendCommand(mCommand->getCommandByID(COMMAND_ID::ACQUIRE_CURRENT_ZOOM)); // Field of view in degrees
response = mSerial->readResponse();
responseValues = SerialResponse::getResponceValues(response);
SerialResponse::printResponse(response);
fov = CAMERA_FIELD_OF_VIEW_HORIZONTAL / (responseValues["zoom"].toFloat() == 0.0 ? 1.0 : responseValues["zoom"].toFloat());
mSerial->sendCommand(mCommand->getCommandByID(COMMAND_ID::ACQUIRE_ATTITUDE_DATA)); // Yaw and pitch in degrees
response = mSerial->readResponse();
responseValues = SerialResponse::getResponceValues(response);
SerialResponse::printResponse(response);
yaw = responseValues["yaw"].toFloat();
pitch = responseValues["pitch"].toFloat();
mSerial->sendCommand(mCommand->getCommandByID(COMMAND_ID::ACQUIRE_CAMERA_CODEC_SPECS)); // Camera resolution in pixels
response = mSerial->readResponse();
responseValues = SerialResponse::getResponceValues(response);
SerialResponse::printResponse(response);
height = responseValues["height"].toUInt();
width = responseValues["width"].toUInt();
uint16_t height = Config::getResolutionHeight();
uint16_t width = Config::getResolutionWidth();
float yaw = Config::getCurrentYaw();
float pitch = Config::getCurrentPitch();
float zoom = Config::getCurrentZoom();
float fov = CAMERA_FIELD_OF_VIEW_HORIZONTAL / zoom;
return {height, width, pitch, yaw, fov};
}
@@ -107,7 +50,7 @@ float UtilsTargetLocation::degreesToRadians(float degrees)
// Function to calculate the new GPS location
GPSData UtilsTargetLocation::calculateTargetLocation(DroneData drone, CameraData camera, float distance, float bearing)
{
constexpr float R = 6371000.0; // Earth radius in meters
constexpr float R = 6371000.0f; // Earth radius in meters
float bearingRad = degreesToRadians(bearing);
float latRad = degreesToRadians(drone.gps.latitude);
@@ -116,7 +59,7 @@ GPSData UtilsTargetLocation::calculateTargetLocation(DroneData drone, CameraData
float newLatRad = asin(sin(latRad) * cos(distance / R) + cos(latRad) * sin(distance / R) * cos(bearingRad));
float newLonRad = lonRad + atan2(sin(bearingRad) * sin(distance / R) * cos(latRad), cos(distance / R) - sin(latRad) * sin(newLatRad));
float angleRad = camera.pitch * M_PI / 180.0;
float angleRad = camera.pitch * M_PI / 180.0f;
float horizontalDistance = distance * cos(angleRad);
float newAltitude = drone.gps.altitude + (horizontalDistance * tan(angleRad));
@@ -128,157 +71,27 @@ GPSData UtilsTargetLocation::calculateTargetLocation(DroneData drone, CameraData
return newLocation;
}
void UtilsTargetLocation::captureImageFromStream()
void UtilsTargetLocation::getAnglesToOnScreenTarget(uint16_t targetX, uint16_t targetY, float &resultYaw, float &resultPitch)
{
#ifdef USE_OPENCV
cv::VideoCapture cap;
cap.open(RTSP_ADDRESS);
resultYaw = Config::getCurrentYaw();
resultPitch = Config::getCurrentPitch();
if (!cap.isOpened()) {
qWarning() << "Error: Could not open RTSP stream";
return;
}
// Normalize target pixel location to [-0.5, 0.5] range
double normPixelX = (targetX - Config::getResolutionWidth() / 2.0f) / (Config::getResolutionWidth() / 2.0f);
double normPixelY = (targetY - Config::getResolutionHeight() / 2.0f) / (Config::getResolutionHeight() / 2.0f);
// Hack to capture proper image
// For some reason first frames get corrupted
uint8_t frameCount = 0;
// Adjust horizontal field of view for zoom
double horizontalFov = CAMERA_FIELD_OF_VIEW_HORIZONTAL * (1.0f + (Config::getCurrentZoom() - 1.0f) / 5.0f);
while (true) {
cv::Mat frame;
// Calculate image plane dimensions based on focal length and aspect ratio
double imagePlaneWidth = 2.0f * CAMERA_FOCAL_LENGTH * tan(horizontalFov / 2.0f * M_PI / 180.0f);
double imagePlaneHeight = imagePlaneWidth / CAMERA_ASPECT_RATIO;
if (cap.grab() && cap.retrieve(frame)) {
if (frame.empty() || frame.total() == 0) {
qInfo() << "Warning: Invalid frame captured, retrying...";
} else {
// Convert the frame to BGR if needed
if (frame.channels() == 1) {
qInfo() << "Captured frame is grayscale, converting to BGR";
cv::cvtColor(frame, frame, cv::COLOR_GRAY2BGR);
} else if (frame.channels() == 4) {
qInfo() << "Captured frame is RGBA, converting to BGR";
cv::cvtColor(frame, frame, cv::COLOR_RGBA2BGR);
}
// Calculate angle offsets based on normalized pixel location and image plane
// dimensions
float turnX = atan2(normPixelX * imagePlaneWidth / 2.0f, CAMERA_FOCAL_LENGTH) * 180.0f / M_PI;
float turnY = atan2(normPixelY * imagePlaneHeight / 2.0f, CAMERA_FOCAL_LENGTH) * 180.0f / M_PI;
frameCount++;
if (frameCount > 10) {
// Save the captured frame as an image
std::string filename = "output_opencv.jpg";
if (cv::imwrite(filename, frame)) {
qInfo() << "Image captured and saved as" << QString::fromStdString(filename);
break;
} else {
qInfo() << "Error: Failed to save the captured image";
}
} else if (frameCount > 100) {
qInfo() << "Error: Tried 100 times and still fails, giving up";
break;
}
}
} else {
qInfo() << "Warning: Could not grab or retrieve frame, retrying...";
}
}
// Release the video capture object
cap.release();
#endif
#ifdef USE_FFMPEG
// Initialize FFmpeg
avformat_network_init();
// Open RTSP stream
AVFormatContext *formatContext = avformat_alloc_context();
if (avformat_open_input(&formatContext, RTSP_ADDRESS, NULL, NULL) != 0) {
qDebug() << "Error: Couldn't open RTSP stream";
return;
}
// Find stream info
if (avformat_find_stream_info(formatContext, NULL) < 0) {
qDebug() << "Error: Couldn't find stream info";
return;
}
// Find video stream
int videoStreamIndex = -1;
for (uint i = 0; i < formatContext->nb_streams; i++) {
if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break;
}
}
if (videoStreamIndex == -1) {
qDebug() << "Error: Couldn't find video stream";
return;
}
// Initialize video decoder
AVCodecParameters *codecParameters = formatContext->streams[videoStreamIndex]->codecpar;
AVCodec *codec = avcodec_find_decoder(codecParameters->codec_id);
AVCodecContext *codecContext = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(codecContext, codecParameters);
avcodec_open2(codecContext, codec, NULL);
// Allocate frame
AVFrame *frame = av_frame_alloc();
bool shouldBreak = false;
// Read frames
AVPacket packet;
while (shouldBreak == false && av_read_frame(formatContext, &packet) >= 0) {
if (packet.stream_index == videoStreamIndex) {
if (avcodec_send_packet(codecContext, &packet) >= 0) {
while (avcodec_receive_frame(codecContext, frame) >= 0) {
// Convert frame to RGB if necessary
if (codecParameters->format != AV_PIX_FMT_RGB24) {
AVFrame *rgbFrame = av_frame_alloc();
int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, codecParameters->width, codecParameters->height, 1);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
if (av_image_fill_arrays(rgbFrame->data, rgbFrame->linesize, buffer, AV_PIX_FMT_RGB24, codecParameters->width, codecParameters->height, 1) > 0) {
// Convert format to enum AVPixelFormat
const AVPixFmtDescriptor *pixDesc = av_pix_fmt_desc_get((AVPixelFormat) codecParameters->format);
if (!pixDesc) {
qDebug() << "Error: Unsupported pixel format";
return;
}
// Get the AVPixelFormat enum value directly from the
descriptor AVPixelFormat pixelFormat = (AVPixelFormat) codecParameters->format;
// Create the sws context
struct SwsContext *swsContext = sws_getContext(codecParameters->width, codecParameters->height, pixelFormat, codecParameters->width, codecParameters->height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
sws_scale(swsContext, frame->data, frame->linesize, 0, codecParameters->height, rgbFrame->data, rgbFrame->linesize);
sws_freeContext(swsContext);
// Save frame as JPEG
QImage image(rgbFrame->data[0], codecParameters->width, codecParameters->height, QImage::Format_RGB888);
image.save("output_ffmpeg.jpg");
av_free(buffer);
av_frame_free(&rgbFrame);
shouldBreak = true;
break;
}
} else {
// Save frame as JPEG
QImage image(frame->data[0], codecParameters->width, codecParameters->height, QImage::Format_RGB888);
image.save("output_ffmpeg.jpg");
shouldBreak = true;
break;
}
}
}
}
av_packet_unref(&packet);
}
// Clean up
avformat_close_input(&formatContext);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
#endif
resultYaw -= turnX;
resultPitch -= turnY;
}