Files
autopilot/misc/rtsp_ai_player/aiengine.cpp
T
Tuomas Järvinen 022e4a1200 Add compile option to use YOLOv8 ONNX models for testing.
- added qmake option yolo_onnx to use normal YOLOv8 ONNX models. This makes possible to test
  gimbals camera inside without real model.
- reduced confidence threshold requirement in AiEngineInferencevOnnxRuntime from 0.5 to 0.2
- make printing prettier with ONNX Runtime
- removed unnecessary cv::Mat::clone()

Type: Improvement
Issue: https://denyspopov.atlassian.net/browse/AZ-39
2024-08-18 17:17:40 +03:00

108 lines
3.5 KiB
C++

#include <QDebug>
#include <opencv2/highgui.hpp>
#include "aiengine.h"
#include "aiengineinference.h"
#if defined(OPI5_BUILD)
#include "src-opi5/aiengineinferenceopi5.h"
#elif defined(OPENCV_BUILD)
#include "src-opencv-onnx/aiengineinferenceopencvonnx.h"
#else
#include "src-onnx-runtime/aiengineinferenceonnxruntime.h"
#endif
AiEngine::AiEngine(QString modelPath, QObject *parent)
: QObject{parent}
{
mRtspListener = new AiEngineRtspListener(this);
connect(mRtspListener, &AiEngineRtspListener::frameReceived, this, &AiEngine::frameReceivedSlot);
#if defined(OPI5_BUILD)
mInference = new AiEngineInferenceOpi5(modelPath);
mInference->initialize(0);
mInference2 = new AiEngineInferenceOpi5(modelPath);
mInference2->initialize(1);
mInference3 = new AiEngineInferenceOpi5(modelPath);
mInference3->initialize(2);
#elif defined(OPENCV_BUILD)
mInference = new AiEngineInferenceOpencvOnnx(modelPath);
#else
mInference = new AiEngineInferencevOnnxRuntime(modelPath);
#endif
QThread *inferenceThread = new QThread(this);
mInference->moveToThread(inferenceThread);
connect(mInference, &AiEngineInference::resultsReady, this, &AiEngine::inferenceResultsReceivedSlot, Qt::QueuedConnection);
connect(this, &AiEngine::inferenceFrame, mInference, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection);
inferenceThread->start();
#ifdef OPI5_BUILD
QThread *inferenceThread2 = new QThread(this);
mInference2->moveToThread(inferenceThread2);
connect(mInference2, &AiEngineInference::resultsReady, this, &AiEngine::inferenceResultsReceivedSlot, Qt::QueuedConnection);
connect(this, &AiEngine::inferenceFrame2, mInference2, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection);
inferenceThread2->start();
QThread *inferenceThread3 = new QThread(this);
mInference3->moveToThread(inferenceThread3);
connect(mInference3, &AiEngineInference::resultsReady, this, &AiEngine::inferenceResultsReceivedSlot, Qt::QueuedConnection);
connect(this, &AiEngine::inferenceFrame3, mInference3, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection);
inferenceThread3->start();
#endif
#ifdef GIMBAL
mGimbalClient = new AiEngineGimbalClient(this);
#else
mGimbalClient = nullptr;
#endif
}
void AiEngine::start(void)
{
mRtspListener->startListening();
mElapsedTimer.start();
}
void AiEngine::stop(void)
{
mRtspListener->stopListening();
}
void AiEngine::inferenceResultsReceivedSlot(AiEngineInferenceResult result)
{
mFrameCounter++;
qDebug() << "FPS = " << (mFrameCounter / (mElapsedTimer.elapsed()/1000.0f));
//qDebug() << "AiEngine got inference results in thread: " << QThread::currentThreadId();
if (mGimbalClient != nullptr) {
mGimbalClient->inferenceResultSlot(result);
}
cv::imshow("Received Frame", result.frame);
}
void AiEngine::frameReceivedSlot(cv::Mat frame)
{
//qDebug() << "AiEngine got frame from RTSP listener in thread: " << QThread::currentThreadId();
//cv::imshow("Received Frame", frame);
if (mInference->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame(frame);
}
#ifdef OPI5_BUILD
else if (mInference2->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame2(frame);
}
else if (mInference3->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame3(frame);
}
#endif
}