Chaneges to opi_rtsp test application

- refactoring
  - can use normal YOLOv8 files converted to ONNX format
  - does not work with azaion ONNX files!
This commit is contained in:
Tuomas Järvinen
2024-07-10 19:53:19 +02:00
parent 3d39d8fd99
commit 683f2d538b
6 changed files with 13 additions and 24 deletions
+2 -2
View File
@@ -6,7 +6,7 @@
#ifdef OPI5_BUILD #ifdef OPI5_BUILD
#include "src-opi5/aiengineinferenceopi5.h" #include "src-opi5/aiengineinferenceopi5.h"
#else #else
#include "src-onnx/aiengineinferenceonnx.h" #include "src-opencv-onnx/aiengineinferenceopencvonnx.h"
#endif #endif
AiEngine::AiEngine(QString modelPath, QObject *parent) AiEngine::AiEngine(QString modelPath, QObject *parent)
@@ -18,7 +18,7 @@ AiEngine::AiEngine(QString modelPath, QObject *parent)
#ifdef OPI5_BUILD #ifdef OPI5_BUILD
mInference = new AiEngineInferenceOpi5(modelPath); mInference = new AiEngineInferenceOpi5(modelPath);
#else #else
mInference = new AiEngineInferenceOnnx(modelPath); mInference = new AiEngineInferenceOpencvOnnx(modelPath);
#endif #endif
QThread *inferenceThread = new QThread(this); QThread *inferenceThread = new QThread(this);
+3 -3
View File
@@ -19,12 +19,12 @@ opi5 {
HEADERS += $$PWD/src-opi5/*.h HEADERS += $$PWD/src-opi5/*.h
} else { } else {
message("ONNX build") message("ONNX build")
message("You must use YOLOv10 ONNX files") message("You must use YOLOv8 ONNX files")
QMAKE_CXXFLAGS += -DONNX_BUILD QMAKE_CXXFLAGS += -DONNX_BUILD
INCLUDEPATH += /opt/onnxruntime-linux-x64-1.18.0/include INCLUDEPATH += /opt/onnxruntime-linux-x64-1.18.0/include
LIBS += /opt/onnxruntime-linux-x64-1.18.0/lib/libonnxruntime.so.1.18.0 LIBS += /opt/onnxruntime-linux-x64-1.18.0/lib/libonnxruntime.so.1.18.0
QMAKE_LFLAGS += -Wl,-rpath,/opt/onnxruntime-linux-x64-1.18.0/lib QMAKE_LFLAGS += -Wl,-rpath,/opt/onnxruntime-linux-x64-1.18.0/lib
QMAKE_LFLAGS += -Wl,-rpath,/usr/local/lib QMAKE_LFLAGS += -Wl,-rpath,/usr/local/lib
SOURCES += $$PWD/src-onnx/*.cpp SOURCES += $$PWD/src-opencv-onnx/*.cpp
HEADERS += $$PWD/src-onnx/*.h HEADERS += $$PWD/src-opencv-onnx/*.h
} }
@@ -1,13 +1,13 @@
#include <QDebug> #include <QDebug>
#include <QThread> #include <QThread>
#include "aiengineinferenceonnx.h" #include "aiengineinferenceopencvonnx.h"
const int INFERENCE_SQUARE_WIDTH = 640; const int INFERENCE_SQUARE_WIDTH = 640;
const int INFERENCE_SQUARE_HEIGHT = 640; const int INFERENCE_SQUARE_HEIGHT = 640;
AiEngineInferenceOnnx::AiEngineInferenceOnnx(QString modelPath, QObject *parent) AiEngineInferenceOpencvOnnx::AiEngineInferenceOpencvOnnx(QString modelPath, QObject *parent)
: AiEngineInference{modelPath, parent}, : AiEngineInference{modelPath, parent},
mInference(modelPath.toStdString(), cv::Size(640, 640), "classes.txt") mInference(modelPath.toStdString(), cv::Size(640, 640), "classes.txt")
{ {
@@ -53,7 +53,7 @@ cv::Mat resizeAndPad(const cv::Mat& src)
} }
void AiEngineInferenceOnnx::performInferenceSlot(cv::Mat frame) void AiEngineInferenceOpencvOnnx::performInferenceSlot(cv::Mat frame)
{ {
try { try {
//qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId(); //qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
@@ -64,6 +64,8 @@ void AiEngineInferenceOnnx::performInferenceSlot(cv::Mat frame)
std::vector<Detection> detections = mInference.runInference(scaledImage); std::vector<Detection> detections = mInference.runInference(scaledImage);
AiEngineInferenceResult result; AiEngineInferenceResult result;
//qDebug() << "performInferenceSlot() found " << detections.size() << " objects";
for (uint i = 0; i < detections.size(); ++i) { for (uint i = 0; i < detections.size(); ++i) {
const Detection &detection = detections[i]; const Detection &detection = detections[i];
@@ -76,19 +78,6 @@ void AiEngineInferenceOnnx::performInferenceSlot(cv::Mat frame)
object.rectangle.bottom = detection.box.y + detection.box.height; object.rectangle.bottom = detection.box.y + detection.box.height;
object.rectangle.right = detection.box.x + detection.box.width; object.rectangle.right = detection.box.x + detection.box.width;
result.objects.append(object); result.objects.append(object);
/*
// Draw box and text
cv::Rect box = detection.box;
cv::Scalar color = detection.color;
cv::rectangle(frame, box, color, 2);
std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);
//std::cout << "classString:" << classString << std::endl;
cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
cv::rectangle(scaledImage, textBox, color, cv::FILLED);
cv::putText(scaledImage, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);
*/
} }
if (result.objects.empty() == false) { if (result.objects.empty() == false) {
@@ -2,13 +2,13 @@
#include <QObject> #include <QObject>
#include "aiengineinference.h" #include "aiengineinference.h"
#include "src-onnx/inference.h" #include "src-opencv-onnx/inference.h"
class AiEngineInferenceOnnx : public AiEngineInference class AiEngineInferenceOpencvOnnx : public AiEngineInference
{ {
Q_OBJECT Q_OBJECT
public: public:
explicit AiEngineInferenceOnnx(QString modelPath, QObject *parent = nullptr); explicit AiEngineInferenceOpencvOnnx(QString modelPath, QObject *parent = nullptr);
public slots: public slots:
void performInferenceSlot(cv::Mat frame) override; void performInferenceSlot(cv::Mat frame) override;