Addd initial AiEngineGimbalControl class

This commit is contained in:
Your Name
2024-07-03 17:03:00 +03:00
parent 2d94fd576f
commit df6352fe92
9 changed files with 140 additions and 62 deletions
+33 -44
View File
@@ -4,6 +4,9 @@
#include <opencv2/highgui.hpp>
#include <iomanip>
#include "aiengineinferenceopi5.h"
#include "file_utils.h"
#include "image_drawing.h"
AiEngineInferenceOpi5::AiEngineInferenceOpi5(QString modelPath, QObject *parent)
: AiEngineInference{modelPath, parent}
@@ -28,7 +31,7 @@ AiEngineInferenceOpi5::~AiEngineInferenceOpi5()
}
image_buffer_t convertCV2FrameToImageBuffer(const cv::Mat& bgrFrame)
image_buffer_t AiEngineInferenceOpi5::convertCV2FrameToImageBuffer(const cv::Mat& bgrFrame)
{
// Convert BGR to RGB
cv::Mat rgbFrame;
@@ -50,7 +53,7 @@ image_buffer_t convertCV2FrameToImageBuffer(const cv::Mat& bgrFrame)
}
void freeImageBuffer(image_buffer_t& imgBuffer)
void AiEngineInferenceOpi5::freeImageBuffer(image_buffer_t& imgBuffer)
{
if (imgBuffer.virt_addr) {
delete[] imgBuffer.virt_addr;
@@ -59,42 +62,25 @@ void freeImageBuffer(image_buffer_t& imgBuffer)
}
cv::Mat resizeAndCenterImage(const cv::Mat& src, int targetWidth, int targetHeight)
cv::Mat AiEngineInferenceOpi5::resizeToHalfAndAssigntoTopLeft640x640(const cv::Mat& inputFrame)
{
// Calculate the scaling factor and size
int originalWidth = src.cols;
int originalHeight = src.rows;
double aspectRatio = static_cast<double>(originalWidth) / originalHeight;
int newWidth, newHeight;
// Resize input frame to half size
cv::Mat resizedFrame;
cv::resize(inputFrame, resizedFrame, cv::Size(), 0.5, 0.5);
if (originalWidth > originalHeight) {
newWidth = targetWidth;
newHeight = static_cast<int>(targetWidth / aspectRatio);
}
else {
newHeight = targetHeight;
newWidth = static_cast<int>(targetHeight * aspectRatio);
}
// Create a 640x640 frame to place the resized frame
cv::Mat outputFrame = cv::Mat::zeros(640, 640, inputFrame.type());
// Resize the image
cv::Mat resizedImage;
cv::resize(src, resizedImage, cv::Size(newWidth, newHeight));
// Copy the resized frame to the top-left corner of the output frame
cv::Rect roi(0, 0, resizedFrame.cols, resizedFrame.rows);
resizedFrame.copyTo(outputFrame(roi));
// Create a black image of target size
cv::Mat outputImage = cv::Mat::zeros(targetHeight, targetWidth, src.type());
// Calculate position to center the image
int top = (targetHeight - newHeight) / 2;
int left = (targetWidth - newWidth) / 2;
// Copy resized image into the center of the black image
resizedImage.copyTo(outputImage(cv::Rect(left, top, newWidth, newHeight)));
return outputImage;
return outputFrame;
}
void drawObjects(cv::Mat& image, const object_detect_result_list& result_list) {
void AiEngineInferenceOpi5::drawObjects(cv::Mat& image, const object_detect_result_list& result_list)
{
for (int i = 0; i < result_list.count; i++) {
const object_detect_result& result = result_list.results[i];
@@ -105,7 +91,6 @@ void drawObjects(cv::Mat& image, const object_detect_result_list& result_list) {
cv::rectangle(image, cv::Point(left, top), cv::Point(right, bottom), cv::Scalar(255, 0, 0), 2);
fprintf(stderr, "TUOMAS OMA. ID=%d String=%s\n", result.cls_id, coco_cls_to_name(result.cls_id));
// Text
char c_text[256];
sprintf(c_text, "%s %.1f%%", coco_cls_to_name(result.cls_id), result.prop * 100);
@@ -117,22 +102,32 @@ void drawObjects(cv::Mat& image, const object_detect_result_list& result_list) {
void AiEngineInferenceOpi5::performInferenceSlot(cv::Mat frame)
{
qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
//qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
mActive = true;
cv::Mat scaledFrame = resizeAndCenterImage(frame, 640, 640);
cv::Mat scaledFrame = resizeToHalfAndAssigntoTopLeft640x640(frame);
image_buffer_t imgBuffer = convertCV2FrameToImageBuffer(scaledFrame);
object_detect_result_list od_results;
int ret = inference_yolov8_model(&mRrknnAppCtx, &imgBuffer, &od_results);
if (ret != 0) {
qDebug() << "inference_yolov8_model() failure! ret: " << ret;
return;
}
AiEngineInferenceResult result;
for (int i = 0; i < od_results.count; i++) {
object_detect_result *det_result = &(od_results.results[i]);
fprintf(stderr, "TUOMAS RKNN. ID=%d String=%s\n", det_result->cls_id, coco_cls_to_name(det_result->cls_id));
AiEngineObject object;
object.classId = det_result->cls_id;
object.propability = det_result->prop;
object.rectangle.top = det_result->box.top;
object.rectangle.left = det_result->box.left;
object.rectangle.bottom = det_result->box.bottom;
object.rectangle.right = det_result->box.right;
result.objects.append(object);
}
/*
@@ -154,13 +149,6 @@ void AiEngineInferenceOpi5::performInferenceSlot(cv::Mat frame)
}
*/
// Testing
//write_image(filename.c_str(), &imgBuffer);
drawObjects(scaledFrame, od_results);
freeImageBuffer(imgBuffer);
/*
static int imageNum = 0;
std::stringstream ss;
@@ -170,9 +158,10 @@ void AiEngineInferenceOpi5::performInferenceSlot(cv::Mat frame)
cv::imwrite(filename, scaledFrame);
*/
AiEngineInferenceResult result;
drawObjects(scaledFrame, od_results);
freeImageBuffer(imgBuffer);
result.frame = scaledFrame.clone();
result.objects = od_results.count;
emit resultsReady(result);
mActive = false;
@@ -4,8 +4,6 @@
#include "aiengineinference.h"
#include "yolov8.h"
#include "image_utils.h"
#include "file_utils.h"
#include "image_drawing.h"
class AiEngineInferenceOpi5 : public AiEngineInference
{
@@ -18,5 +16,10 @@ public slots:
void performInferenceSlot(cv::Mat frame) override;
private:
image_buffer_t convertCV2FrameToImageBuffer(const cv::Mat& bgrFrame);
void freeImageBuffer(image_buffer_t& imgBuffer);
cv::Mat resizeToHalfAndAssigntoTopLeft640x640(const cv::Mat& inputFrame);
void drawObjects(cv::Mat& image, const object_detect_result_list& result_list);
rknn_app_context_t mRrknnAppCtx;
};