mirror of
https://github.com/azaion/autopilot.git
synced 2026-04-22 22:26:35 +00:00
d4779b1bb0
- added printing of inference FPS - simple AI test bench which can be used to compare models
178 lines
5.8 KiB
C++
178 lines
5.8 KiB
C++
#include <QDebug>
|
|
#include <QThread>
|
|
#include <opencv2/imgproc.hpp>
|
|
#include <opencv2/highgui.hpp>
|
|
#include <iomanip>
|
|
#include "aiengineinferenceopi5.h"
|
|
#include "file_utils.h"
|
|
#include "image_drawing.h"
|
|
|
|
|
|
AiEngineInferenceOpi5::AiEngineInferenceOpi5(QString modelPath, QObject *parent)
|
|
: AiEngineInference{modelPath, parent}
|
|
{
|
|
qDebug() << "AiEngineInferenceOpi5() test mModelPath=" << mModelPath;
|
|
}
|
|
|
|
|
|
void AiEngineInferenceOpi5::initialize(int number)
|
|
{
|
|
mNumber = number;
|
|
|
|
memset(&mRrknnAppCtx0, 0, sizeof(rknn_app_context_t));
|
|
init_post_process();
|
|
|
|
int ret = init_yolov8_model(mModelPath.toLocal8Bit(), &mRrknnAppCtx0);
|
|
if (ret != 0) {
|
|
qDebug() << "init_yolov8_model() failure! ret: " << ret << "modelPath = " << mModelPath << "number:" << number;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
AiEngineInferenceOpi5::~AiEngineInferenceOpi5()
|
|
{
|
|
deinit_post_process();
|
|
release_yolov8_model(&mRrknnAppCtx0);
|
|
}
|
|
|
|
|
|
image_buffer_t AiEngineInferenceOpi5::convertCV2FrameToImageBuffer(const cv::Mat& bgrFrame)
|
|
{
|
|
// Convert BGR to RGB
|
|
cv::Mat rgbFrame;
|
|
cv::cvtColor(bgrFrame, rgbFrame, cv::COLOR_BGR2RGB);
|
|
|
|
image_buffer_t imgBuffer;
|
|
memset(&imgBuffer, 0, sizeof(image_buffer_t));
|
|
imgBuffer.width = rgbFrame.cols;
|
|
imgBuffer.height = rgbFrame.rows;
|
|
imgBuffer.width_stride = rgbFrame.step;
|
|
imgBuffer.height_stride = rgbFrame.rows;
|
|
imgBuffer.format = IMAGE_FORMAT_RGB888;
|
|
imgBuffer.size = rgbFrame.total() * rgbFrame.elemSize();
|
|
|
|
imgBuffer.virt_addr = new unsigned char[imgBuffer.size];
|
|
std::memcpy(imgBuffer.virt_addr, rgbFrame.data, imgBuffer.size);
|
|
|
|
return imgBuffer;
|
|
}
|
|
|
|
|
|
void AiEngineInferenceOpi5::freeImageBuffer(image_buffer_t& imgBuffer)
|
|
{
|
|
if (imgBuffer.virt_addr) {
|
|
delete[] imgBuffer.virt_addr;
|
|
imgBuffer.virt_addr = nullptr;
|
|
}
|
|
}
|
|
|
|
|
|
cv::Mat AiEngineInferenceOpi5::resizeToHalfAndAssigntoTopLeft640x640(const cv::Mat& inputFrame)
|
|
{
|
|
/*
|
|
// Resize input frame to half size
|
|
cv::Mat resizedFrame;
|
|
cv::resize(inputFrame, resizedFrame, cv::Size(), 0.5, 0.5);
|
|
|
|
// Create a 640x640 frame to place the resized frame
|
|
cv::Mat outputFrame = cv::Mat::zeros(640, 640, inputFrame.type());
|
|
|
|
// Copy the resized frame to the top-left corner of the output frame
|
|
cv::Rect roi(0, 0, resizedFrame.cols, resizedFrame.rows);
|
|
resizedFrame.copyTo(outputFrame(roi));
|
|
|
|
return outputFrame;
|
|
*/
|
|
|
|
const int targetWidth = 640;
|
|
const int targetHeight = 640;
|
|
float aspectRatio = static_cast<float>(inputFrame.cols) / static_cast<float>(inputFrame.rows);
|
|
int newWidth = targetWidth;
|
|
int newHeight = static_cast<int>(targetWidth / aspectRatio);
|
|
if (newHeight > targetHeight) {
|
|
newHeight = targetHeight;
|
|
newWidth = static_cast<int>(targetHeight * aspectRatio);
|
|
}
|
|
|
|
cv::Mat resizedFrame;
|
|
cv::resize(inputFrame, resizedFrame, cv::Size(newWidth, newHeight));
|
|
cv::Mat outputFrame = cv::Mat::zeros(targetHeight, targetWidth, inputFrame.type());
|
|
cv::Rect roi(cv::Point(0, 0), resizedFrame.size());
|
|
resizedFrame.copyTo(outputFrame(roi));
|
|
|
|
return outputFrame;
|
|
}
|
|
|
|
|
|
void AiEngineInferenceOpi5::drawObjects(cv::Mat& image, const object_detect_result_list& result_list)
|
|
{
|
|
for (int i = 0; i < result_list.count; i++) {
|
|
const object_detect_result& result = result_list.results[i];
|
|
|
|
if (result.cls_id >= mClassNames.size()) {
|
|
//result.cls_id = result.cls_id % mClassNames.size();
|
|
qDebug() << "Class id >= mClassNames.size() Reducing it.";
|
|
//continue;
|
|
}
|
|
|
|
fprintf(stderr, "TUOMAS [%d] prop = %f\n", i, result.prop);
|
|
|
|
int left = result.box.left;
|
|
int top = result.box.top;
|
|
int right = result.box.right;
|
|
int bottom = result.box.bottom;
|
|
|
|
cv::rectangle(image, cv::Point(left, top), cv::Point(right, bottom), cv::Scalar(255, 0, 0), 2);
|
|
|
|
// Text
|
|
char c_text[256];
|
|
//sprintf(c_text, "%s %d%%", coco_cls_to_name(result.cls_id), (int)(round(result.prop * 100)));
|
|
sprintf(c_text, "%s %d%%", mClassNames[result.cls_id % mClassNames.size()].toStdString().c_str(), (int)(round(result.prop * 100)));
|
|
cv::Point textOrg(left, top - 5);
|
|
cv::putText(image, std::string(c_text), textOrg, cv::FONT_HERSHEY_COMPLEX, result.prop, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
|
|
}
|
|
}
|
|
|
|
|
|
void AiEngineInferenceOpi5::performInferenceSlot(cv::Mat frame)
|
|
{
|
|
//qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
|
|
|
|
mActive = true;
|
|
|
|
cv::Mat scaledFrame = resizeToHalfAndAssigntoTopLeft640x640(frame);
|
|
image_buffer_t imgBuffer = convertCV2FrameToImageBuffer(scaledFrame);
|
|
|
|
object_detect_result_list od_results;
|
|
int ret = inference_yolov8_model(&mRrknnAppCtx0, &imgBuffer, &od_results, mNumber);
|
|
freeImageBuffer(imgBuffer);
|
|
if (ret != 0) {
|
|
qDebug() << "AiEngineInferenceOpi5::performInferenceSlot() failure! ret: " << ret;
|
|
mActive = false;
|
|
return;
|
|
}
|
|
|
|
if (od_results.count > 0) {
|
|
AiEngineInferenceResult result;
|
|
for (int i = 0; i < od_results.count; i++) {
|
|
object_detect_result *det_result = &(od_results.results[i]);
|
|
qDebug() << "TUOMAS box:" << det_result->box.top << det_result->box.left << det_result->box.bottom << det_result->box.right;
|
|
AiEngineObject object;
|
|
object.classId = det_result->cls_id;
|
|
object.propability = det_result->prop;
|
|
object.rectangle.top = det_result->box.top;
|
|
object.rectangle.left = det_result->box.left;
|
|
object.rectangle.bottom = det_result->box.bottom;
|
|
object.rectangle.right = det_result->box.right;
|
|
result.objects.append(object);
|
|
}
|
|
|
|
drawObjects(scaledFrame, od_results);
|
|
result.frame = scaledFrame.clone();
|
|
emit resultsReady(result);
|
|
}
|
|
|
|
mActive = false;
|
|
}
|