Enables use of multiple TPUs in OPI5

This commit is contained in:
Tuomas Järvinen
2024-07-27 11:28:47 +03:00
parent 147213cec6
commit 7052a05d55
17 changed files with 336 additions and 65 deletions
+36 -6
View File
@@ -2,14 +2,32 @@
`rtsp_ai_player` is an application that listens to an RTSP stream, analyzes images with an AI model, and shows the results visually. It also controls a gimbal camera to zoom in on the recognized objects. Application uses YOLOv8 AI models converted to the ONNX format. `rtsp_ai_player` is an application that listens to an RTSP stream, analyzes images with an AI model, and shows the results visually. It also controls a gimbal camera to zoom in on the recognized objects. Application uses YOLOv8 AI models converted to the ONNX format.
### How to convert PT file to ONNX format ### How to convert Azaion AI model file to ONNX format
```bash ```bash
yolo export model=azaion-2024-06-28.pt dynamic=False format=onnx imgsz=640,640 yolo export model=azaion-2024-06-28.pt dynamic=False format=onnx imgsz=640,640
``` ```
## How to use application locally on a Linux PC. ## How to use application locally on a Linux PC.
### Install ONNX runtime # Speed up compilations
```bash
echo "export MAKEFLAGS=\"-j8\"" >> ~/.bashrc
echo "export PATH=/usr/lib/ccache:\$PATH" >> ~/.bashrc
```
### Install OpenCV 4.10.0
```bash
sudo apt update
sudo apt install libgtk-3-dev libpng-dev cmake ffmpeg libavcodec-dev libavformat-dev libavfilter-dev
wget https://github.com/opencv/opencv/archive/refs/tags/4.10.0.zip
unzip 4.10.0.zip
cd opencv-4.10.0
mkdir build && cd build
cmake -DCMAKE_INSTALL_PREFIX=/opt/opencv-4.10.0 -DBUILD_opencv_world=ON -DOPENCV_GENERATE_PKGCONFIG=ON -DBUILD_PERF_TESTS=OFF -DBUILD_TESTS=OFF ..
make -j8 && sudo make install
```
### Install ONNX runtime 1.18.0
```bash ```bash
wget https://github.com/microsoft/onnxruntime/releases/download/v1.18.0/onnxruntime-linux-x64-1.18.0.tgz wget https://github.com/microsoft/onnxruntime/releases/download/v1.18.0/onnxruntime-linux-x64-1.18.0.tgz
sudo tar xf onnxruntime-linux-x64-1.18.0.tgz -C /opt sudo tar xf onnxruntime-linux-x64-1.18.0.tgz -C /opt
@@ -24,15 +42,20 @@ mkdir mediamtx
tar xf mediamtx_v1.8.4_linux_amd64.tar.gz -C mediamtx tar xf mediamtx_v1.8.4_linux_amd64.tar.gz -C mediamtx
``` ```
### Launch mediamtx RTSP server: ### If you use video file from the local RTSP server:
```bash ```bash
cd mediamtx cd mediamtx
./mediamtx ./mediamtx
``` ```
### Play mp4 video file from RTSP server: ### Play Azaion mp4 video file from RTSP server ... :
```bash ```bash
ffmpeg -re -stream_loop -1 -i $HOME/azaion/models/videos/for_ai_short.mp4 -c copy -f rtsp rtsp://localhost:8554/live.stream ffmpeg -re -stream_loop -1 -i $HOME/azaion/videos/for_ai_cut.mp4 -c copy -f rtsp rtsp://localhost:8554/live.stream
```
### ... or play simple video file from RTSP server:
```bash
ffmpeg -re -stream_loop -1 -i $HOME/azaion/videos/table.mp4 -c copy -f rtsp rtsp://localhost:8554/live.stream
``` ```
### Test RTSP streaming with ffplay: ### Test RTSP streaming with ffplay:
@@ -40,7 +63,14 @@ ffmpeg -re -stream_loop -1 -i $HOME/azaion/models/videos/for_ai_short.mp4 -c cop
ffplay -rtsp_transport tcp rtsp://localhost:8554/live.stream ffplay -rtsp_transport tcp rtsp://localhost:8554/live.stream
``` ```
### Compile and run rtsp_ai_player: ### Compile and run rtsp_ai_player with YOLOv8 medium AI mode:
```bash
cd autopilot/misc/rtsp_ai_player
qmake6 && make
./rtsp_ai_player ~/azaion/models/onnx/yolov8m.onnx
```
### Compile and run rtsp_ai_player with Azaion AI mode:
```bash ```bash
cd autopilot/misc/rtsp_ai_player cd autopilot/misc/rtsp_ai_player
qmake6 && make qmake6 && make
+33
View File
@@ -19,6 +19,11 @@ AiEngine::AiEngine(QString modelPath, QObject *parent)
#if defined(OPI5_BUILD) #if defined(OPI5_BUILD)
mInference = new AiEngineInferenceOpi5(modelPath); mInference = new AiEngineInferenceOpi5(modelPath);
mInference->initialize(0);
mInference2 = new AiEngineInferenceOpi5(modelPath);
mInference2->initialize(1);
mInference3 = new AiEngineInferenceOpi5(modelPath);
mInference3->initialize(2);
#elif defined(OPENCV_BUILD) #elif defined(OPENCV_BUILD)
mInference = new AiEngineInferenceOpencvOnnx(modelPath); mInference = new AiEngineInferenceOpencvOnnx(modelPath);
#else #else
@@ -31,6 +36,20 @@ AiEngine::AiEngine(QString modelPath, QObject *parent)
connect(this, &AiEngine::inferenceFrame, mInference, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection); connect(this, &AiEngine::inferenceFrame, mInference, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection);
inferenceThread->start(); inferenceThread->start();
#ifdef OPI5_BUILD
QThread *inferenceThread2 = new QThread(this);
mInference2->moveToThread(inferenceThread2);
connect(mInference2, &AiEngineInference::resultsReady, this, &AiEngine::inferenceResultsReceivedSlot, Qt::QueuedConnection);
connect(this, &AiEngine::inferenceFrame2, mInference2, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection);
inferenceThread2->start();
QThread *inferenceThread3 = new QThread(this);
mInference3->moveToThread(inferenceThread3);
connect(mInference3, &AiEngineInference::resultsReady, this, &AiEngine::inferenceResultsReceivedSlot, Qt::QueuedConnection);
connect(this, &AiEngine::inferenceFrame3, mInference3, &AiEngineInference::performInferenceSlot, Qt::QueuedConnection);
inferenceThread3->start();
#endif
mGimbalClient = new AiEngineGimbalClient(this); mGimbalClient = new AiEngineGimbalClient(this);
} }
@@ -38,6 +57,7 @@ AiEngine::AiEngine(QString modelPath, QObject *parent)
void AiEngine::start(void) void AiEngine::start(void)
{ {
mRtspListener->startListening(); mRtspListener->startListening();
mElapsedTimer.start();
} }
@@ -49,6 +69,9 @@ void AiEngine::stop(void)
void AiEngine::inferenceResultsReceivedSlot(AiEngineInferenceResult result) void AiEngine::inferenceResultsReceivedSlot(AiEngineInferenceResult result)
{ {
mFrameCounter++;
qDebug() << "FPS = " << (mFrameCounter / (mElapsedTimer.elapsed()/1000.0f));
//qDebug() << "AiEngine got inference results in thread: " << QThread::currentThreadId(); //qDebug() << "AiEngine got inference results in thread: " << QThread::currentThreadId();
mGimbalClient->inferenceResultSlot(result); mGimbalClient->inferenceResultSlot(result);
cv::imshow("Received Frame", result.frame); cv::imshow("Received Frame", result.frame);
@@ -64,4 +87,14 @@ void AiEngine::frameReceivedSlot(cv::Mat frame)
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it."; //qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame(frame.clone()); emit inferenceFrame(frame.clone());
} }
#ifdef OPI5_BUILD
else if (mInference2->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame2(frame.clone());
}
else if (mInference3->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame3(frame.clone());
}
#endif
} }
+7
View File
@@ -1,6 +1,7 @@
#pragma once #pragma once
#include <QObject> #include <QObject>
#include <QElapsedTimer>
#include <opencv2/core.hpp> #include <opencv2/core.hpp>
#include <opencv2/videoio.hpp> #include <opencv2/videoio.hpp>
#include "aienginertsplistener.h" #include "aienginertsplistener.h"
@@ -21,9 +22,15 @@ public slots:
signals: signals:
void inferenceFrame(cv::Mat frame); void inferenceFrame(cv::Mat frame);
void inferenceFrame2(cv::Mat frame);
void inferenceFrame3(cv::Mat frame);
private: private:
QElapsedTimer mElapsedTimer;
uint32_t mFrameCounter = 0;
AiEngineRtspListener *mRtspListener; AiEngineRtspListener *mRtspListener;
AiEngineInference *mInference; AiEngineInference *mInference;
AiEngineInference *mInference2;
AiEngineInference *mInference3;
AiEngineGimbalClient *mGimbalClient; AiEngineGimbalClient *mGimbalClient;
}; };
+12
View File
@@ -0,0 +1,12 @@
#pragma once
#include <QString>
#ifdef OPI5_BUILD
QString rtspVideoUrl = "rtsp://192.168.0.1:8554/live.stream";
#else
// Video file from the local MTX RTSP server or gimbal camera.
QString rtspVideoUrl = "rtsp://localhost:8554/live.stream";
//QString rtspVideoUrl = "rtsp://192.168.0.25:8554/main.264";
#endif
+29 -6
View File
@@ -10,7 +10,7 @@ AiEngineGimbalClient::AiEngineGimbalClient(QObject *parent)
// Create server and run it in the new thread. // Create server and run it in the new thread.
// Connect all signal and slots here. No need to do the same in AiEngineGimbalServer class. // Connect all signal and slots here. No need to do the same in AiEngineGimbalServer class.
mGimbalServer = new AiEngineGimbalServer(this); mGimbalServer = new AiEngineGimbalServer();
QThread *gimbalServerThread = new QThread(this); QThread *gimbalServerThread = new QThread(this);
mGimbalServer->moveToThread(gimbalServerThread); mGimbalServer->moveToThread(gimbalServerThread);
@@ -85,6 +85,33 @@ void AiEngineGimbalClient::inferenceResultSlot(AiEngineInferenceResult result)
// TODO!! Just increasing number for testing purposes ATM. // TODO!! Just increasing number for testing purposes ATM.
static int index = 0; static int index = 0;
// Find best possible target ...
int bestObjectIndex = -1;
float bestObjectProb = -1;
for (int i = 0; i < result.objects.size(); i++) {
const AiEngineObject &object = result.objects[i];
if (object.propability > bestObjectProb) {
bestObjectIndex = i;
bestObjectProb = object.propability;
}
}
// ... if found, then ask camera to zoom to it.
if (bestObjectIndex >= 0) {
const AiEngineObject &object = result.objects[bestObjectIndex];
AiEngineCameraTarget target;
target.rectangle = object.rectangle;
target.index = index++;
qDebug() << "Found best target from index" << bestObjectIndex
<< "Name:" << object.classStr
<< "Probability:" << bestObjectProb;
emit zoomToAiTarget(target);
}
/*
// We got list of all recognized objects, but at least for now we will zoom to all objects at // We got list of all recognized objects, but at least for now we will zoom to all objects at
// once and not for each invidually. Got minimal coordinates which contains the all objects. // once and not for each invidually. Got minimal coordinates which contains the all objects.
AiEngineRectangle groupRect = getGroupCoordinates(result.objects); AiEngineRectangle groupRect = getGroupCoordinates(result.objects);
@@ -105,9 +132,5 @@ void AiEngineGimbalClient::inferenceResultSlot(AiEngineInferenceResult result)
} }
qDebug() << "inferenceResultSlot() Zooming to square top=" << groupRect.top << "x" << groupRect.left << "and bottom:" << groupRect.bottom << "x" << groupRect.right; qDebug() << "inferenceResultSlot() Zooming to square top=" << groupRect.top << "x" << groupRect.left << "and bottom:" << groupRect.bottom << "x" << groupRect.right;
*/
AiEngineCameraTarget target;
target.rectangle = groupRect;
target.index = index++;
emit zoomToAiTarget(target);
} }
+3 -1
View File
@@ -17,6 +17,7 @@ public:
AiEngineRectangle rectangle; AiEngineRectangle rectangle;
float propability; float propability;
int classId; int classId;
QString classStr;
}; };
@@ -38,10 +39,11 @@ protected:
cv::Mat resizeAndPad(const cv::Mat& src); cv::Mat resizeAndPad(const cv::Mat& src);
QString mModelPath; QString mModelPath;
bool mActive; bool mActive;
int mNumber;
public slots: public slots:
virtual void performInferenceSlot(cv::Mat frame) = 0; virtual void performInferenceSlot(cv::Mat frame) = 0;
virtual void initialize(int number) = 0;
signals: signals:
void resultsReady(AiEngineInferenceResult results); void resultsReady(AiEngineInferenceResult results);
}; };
+1 -1
View File
@@ -1,7 +1,7 @@
#include <QDebug> #include <QDebug>
#include <QtConcurrent/QtConcurrent> #include <QtConcurrent/QtConcurrent>
#include "aienginertsplistener.h" #include "aienginertsplistener.h"
#include "config.h" #include "aiengineconfig.h"
AiEngineRtspListener::AiEngineRtspListener(QObject *parent) AiEngineRtspListener::AiEngineRtspListener(QObject *parent)
-10
View File
@@ -1,10 +0,0 @@
#pragma once
#include <QString>
#ifdef OPI5_BUILD
//QString rtspVideoUrl = "rtsp://192.168.168.11:8554/live.stream";
QString rtspVideoUrl = "rtsp://192.168.0.25:8554/main.264";
#else
QString rtspVideoUrl = "rtsp://localhost:8554/live.stream";
#endif
+13 -5
View File
@@ -1,7 +1,6 @@
QT += core network serialport QT += core network serialport
QT -= gui QT -= gui
CONFIG += c++11 link_pkgconfig concurrent console CONFIG += c++11 concurrent console
PKGCONFIG += opencv4
MOC_DIR = moc MOC_DIR = moc
OBJECTS_DIR = obj OBJECTS_DIR = obj
@@ -21,8 +20,10 @@ opi5 {
message("OpenCV build") message("OpenCV build")
message("You must use YOLOv8 ONNX files. Azaion model does not work!") message("You must use YOLOv8 ONNX files. Azaion model does not work!")
message("OpenCV must be version 4.10.0 installed to /usr/local/") message("OpenCV must be version 4.10.0 installed to /usr/local/")
INCLUDEPATH += /opt/opencv-4.10.0/include/opencv4/
LIBS += /opt/opencv-4.10.0/lib/libopencv_world.so
QMAKE_LFLAGS += -Wl,-rpath,/opt/opencv-4.10.0/lib
QMAKE_CXXFLAGS += -DOPENCV_BUILD QMAKE_CXXFLAGS += -DOPENCV_BUILD
QMAKE_LFLAGS += -Wl,-rpath,/usr/local/lib
SOURCES += $$PWD/src-opencv-onnx/*.cpp SOURCES += $$PWD/src-opencv-onnx/*.cpp
HEADERS += $$PWD/src-opencv-onnx/*.h HEADERS += $$PWD/src-opencv-onnx/*.h
} }
@@ -30,10 +31,17 @@ else {
message("ONNX build") message("ONNX build")
message("You must use YOLOv8 ONNX files. Azaion ONNX model also works fine.") message("You must use YOLOv8 ONNX files. Azaion ONNX model also works fine.")
QMAKE_CXXFLAGS += -DONNX_BUILD QMAKE_CXXFLAGS += -DONNX_BUILD
# ONNX-runtime
INCLUDEPATH += /opt/onnxruntime-linux-x64-1.18.0/include INCLUDEPATH += /opt/onnxruntime-linux-x64-1.18.0/include
LIBS += /opt/onnxruntime-linux-x64-1.18.0/lib/libonnxruntime.so.1.18.0
QMAKE_LFLAGS += -Wl,-rpath,/opt/onnxruntime-linux-x64-1.18.0/lib QMAKE_LFLAGS += -Wl,-rpath,/opt/onnxruntime-linux-x64-1.18.0/lib
QMAKE_LFLAGS += -Wl,-rpath,/usr/local/lib LIBS += /opt/onnxruntime-linux-x64-1.18.0/lib/libonnxruntime.so.1.18.0
# OpenCV 4.10.0
INCLUDEPATH += /opt/opencv-4.10.0/include/opencv4/
QMAKE_LFLAGS += -Wl,-rpath,/opt/opencv-4.10.0/lib
LIBS += /opt/opencv-4.10.0/lib/libopencv_world.so
SOURCES += $$PWD/src-onnx-runtime/*.cpp SOURCES += $$PWD/src-onnx-runtime/*.cpp
HEADERS += $$PWD/src-onnx-runtime/*.h HEADERS += $$PWD/src-onnx-runtime/*.h
} }
@@ -15,6 +15,7 @@ AiEngineInferencevOnnxRuntime::AiEngineInferencevOnnxRuntime(QString modelPath,
{ {
qDebug() << "TUOMAS AiEngineInferencevOnnxRuntime() mModelPath=" << mModelPath; qDebug() << "TUOMAS AiEngineInferencevOnnxRuntime() mModelPath=" << mModelPath;
/*
mClassNames = { mClassNames = {
"Armoured vehicle", "Armoured vehicle",
"Truck", "Truck",
@@ -27,6 +28,90 @@ AiEngineInferencevOnnxRuntime::AiEngineInferencevOnnxRuntime(QString modelPath,
"Additional protection tank", "Additional protection tank",
"Smoke" "Smoke"
}; };
*/
mClassNames = {
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush"
};
} }
@@ -37,7 +122,7 @@ cv::Mat AiEngineInferencevOnnxRuntime::drawLabels(const cv::Mat &image, const st
for (const auto &detection : detections) for (const auto &detection : detections)
{ {
cv::rectangle(result, detection.box, cv::Scalar(0, 255, 0), 2); cv::rectangle(result, detection.box, cv::Scalar(0, 255, 0), 2);
std::string label = mClassNames[detection.classId] + ": " + std::to_string(detection.conf); std::string label = mClassNames[detection.classId].toStdString() + ": " + std::to_string(detection.conf);
int baseLine; int baseLine;
cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine); cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
@@ -67,10 +152,29 @@ cv::Mat AiEngineInferencevOnnxRuntime::drawLabels(const cv::Mat &image, const st
void AiEngineInferencevOnnxRuntime::performInferenceSlot(cv::Mat frame) void AiEngineInferencevOnnxRuntime::performInferenceSlot(cv::Mat frame)
{ {
mActive = true; //qDebug() << __PRETTY_FUNCTION__;
try {
mActive = true;
cv::Mat scaledImage = resizeAndPad(frame); cv::Mat scaledImage = resizeAndPad(frame);
//cv::imwrite("/tmp/frame.png", scaledImage);
std::vector<Yolov8Result> detections = mPredictor.predict(scaledImage); std::vector<Yolov8Result> detections = mPredictor.predict(scaledImage);
// Only keep following detected objects.
// car = 2
// train = 6
// cup = 41
// banana = 46
auto it = std::remove_if(detections.begin(), detections.end(),
[](const Yolov8Result& result) {
return result.classId != 2 &&
result.classId != 6 &&
result.classId != 41 &&
result.classId != 46;
});
detections.erase(it, detections.end());
AiEngineInferenceResult result; AiEngineInferenceResult result;
for (uint i = 0; i < detections.size(); i++) { for (uint i = 0; i < detections.size(); i++) {
@@ -79,6 +183,7 @@ void AiEngineInferencevOnnxRuntime::performInferenceSlot(cv::Mat frame)
// Add detected objects to the results // Add detected objects to the results
AiEngineObject object; AiEngineObject object;
object.classId = detection.classId; object.classId = detection.classId;
object.classStr = mClassNames[detection.classId];
object.propability = detection.conf; object.propability = detection.conf;
object.rectangle.top = detection.box.y; object.rectangle.top = detection.box.y;
object.rectangle.left = detection.box.x; object.rectangle.left = detection.box.x;
@@ -88,10 +193,25 @@ void AiEngineInferencevOnnxRuntime::performInferenceSlot(cv::Mat frame)
} }
if (result.objects.empty() == false) { if (result.objects.empty() == false) {
qDebug() << __PRETTY_FUNCTION__ << "detections:" << detections.size();
result.frame = drawLabels(scaledImage, detections); result.frame = drawLabels(scaledImage, detections);
emit resultsReady(result); emit resultsReady(result);
} }
mActive = false; mActive = false;
}
catch (const cv::Exception& e) {
std::cout << "OpenCV exception caught: " << e.what() << std::endl;
}
catch (const std::exception& e) {
std::cout << "Standard exception caught: " << e.what() << std::endl;
}
catch (...) {
std::cout << "Unknown exception caught" << std::endl;
}
}
void AiEngineInferencevOnnxRuntime::initialize(int number)
{
(void)number;
} }
@@ -9,6 +9,7 @@ class AiEngineInferencevOnnxRuntime : public AiEngineInference
Q_OBJECT Q_OBJECT
public: public:
explicit AiEngineInferencevOnnxRuntime(QString modelPath, QObject *parent = nullptr); explicit AiEngineInferencevOnnxRuntime(QString modelPath, QObject *parent = nullptr);
void initialize(int number);
public slots: public slots:
void performInferenceSlot(cv::Mat frame) override; void performInferenceSlot(cv::Mat frame) override;
@@ -16,5 +17,5 @@ public slots:
private: private:
cv::Mat drawLabels(const cv::Mat &image, const std::vector<Yolov8Result> &detections); cv::Mat drawLabels(const cv::Mat &image, const std::vector<Yolov8Result> &detections);
YOLOPredictor mPredictor; YOLOPredictor mPredictor;
std::vector<std::string> mClassNames; QVector<QString> mClassNames;
}; };
@@ -21,10 +21,11 @@ void AiEngineInferenceOpencvOnnx::performInferenceSlot(cv::Mat frame)
mActive = true; mActive = true;
cv::Mat scaledImage = resizeAndPad(frame); cv::Mat scaledImage = resizeAndPad(frame);
//cv::imwrite("/tmp/frame.png", scaledImage);
std::vector<Detection> detections = mInference.runInference(scaledImage); std::vector<Detection> detections = mInference.runInference(scaledImage);
AiEngineInferenceResult result; AiEngineInferenceResult result;
//qDebug() << "performInferenceSlot() found " << detections.size() << " objects"; qDebug() << "performInferenceSlot() found " << detections.size() << " objects";
for (uint i = 0; i < detections.size(); ++i) { for (uint i = 0; i < detections.size(); ++i) {
const Detection &detection = detections[i]; const Detection &detection = detections[i];
@@ -38,8 +39,15 @@ void AiEngineInferenceOpencvOnnx::performInferenceSlot(cv::Mat frame)
object.rectangle.bottom = detection.box.y + detection.box.height; object.rectangle.bottom = detection.box.y + detection.box.height;
object.rectangle.right = detection.box.x + detection.box.width; object.rectangle.right = detection.box.x + detection.box.width;
result.objects.append(object); result.objects.append(object);
//qDebug() << "performInferenceSlot()" << object.rectangle.top << object.rectangle.left << "and" << object.rectangle.bottom << object.rectangle.right;
} }
auto end = std::remove_if(detections.begin(), detections.end(), [](const Detection& detection) {
return detection.class_id != 0;
});
detections.erase(end, detections.end());
if (result.objects.empty() == false) { if (result.objects.empty() == false) {
result.frame = mInference.drawLabels(scaledImage, detections); result.frame = mInference.drawLabels(scaledImage, detections);
emit resultsReady(result); emit resultsReady(result);
@@ -51,3 +59,9 @@ void AiEngineInferenceOpencvOnnx::performInferenceSlot(cv::Mat frame)
std::cerr << "performInferenceSlot() Error: " << e.what() << std::endl; std::cerr << "performInferenceSlot() Error: " << e.what() << std::endl;
} }
} }
void AiEngineInferenceOpencvOnnx::initialize(int number)
{
(void)number;
}
@@ -9,6 +9,7 @@ class AiEngineInferenceOpencvOnnx : public AiEngineInference
Q_OBJECT Q_OBJECT
public: public:
explicit AiEngineInferenceOpencvOnnx(QString modelPath, QObject *parent = nullptr); explicit AiEngineInferenceOpencvOnnx(QString modelPath, QObject *parent = nullptr);
void initialize(int number);
public slots: public slots:
void performInferenceSlot(cv::Mat frame) override; void performInferenceSlot(cv::Mat frame) override;
@@ -12,13 +12,19 @@ AiEngineInferenceOpi5::AiEngineInferenceOpi5(QString modelPath, QObject *parent)
: AiEngineInference{modelPath, parent} : AiEngineInference{modelPath, parent}
{ {
qDebug() << "AiEngineInferenceOpi5() test mModelPath=" << mModelPath; qDebug() << "AiEngineInferenceOpi5() test mModelPath=" << mModelPath;
}
memset(&mRrknnAppCtx, 0, sizeof(rknn_app_context_t));
void AiEngineInferenceOpi5::initialize(int number)
{
mNumber = number;
memset(&mRrknnAppCtx0, 0, sizeof(rknn_app_context_t));
init_post_process(); init_post_process();
int ret = init_yolov8_model(modelPath.toLocal8Bit(), &mRrknnAppCtx); int ret = init_yolov8_model(mModelPath.toLocal8Bit(), &mRrknnAppCtx0);
if (ret != 0) { if (ret != 0) {
qDebug() << "init_yolov8_model() failure! ret: " << ret << "modelPath = " << modelPath; qDebug() << "init_yolov8_model() failure! ret: " << ret << "modelPath = " << mModelPath << "number:" << number;
return; return;
} }
} }
@@ -27,7 +33,7 @@ AiEngineInferenceOpi5::AiEngineInferenceOpi5(QString modelPath, QObject *parent)
AiEngineInferenceOpi5::~AiEngineInferenceOpi5() AiEngineInferenceOpi5::~AiEngineInferenceOpi5()
{ {
deinit_post_process(); deinit_post_process();
release_yolov8_model(&mRrknnAppCtx); release_yolov8_model(&mRrknnAppCtx0);
} }
@@ -110,7 +116,7 @@ void AiEngineInferenceOpi5::performInferenceSlot(cv::Mat frame)
image_buffer_t imgBuffer = convertCV2FrameToImageBuffer(scaledFrame); image_buffer_t imgBuffer = convertCV2FrameToImageBuffer(scaledFrame);
object_detect_result_list od_results; object_detect_result_list od_results;
int ret = inference_yolov8_model(&mRrknnAppCtx, &imgBuffer, &od_results); int ret = inference_yolov8_model(&mRrknnAppCtx0, &imgBuffer, &od_results, mNumber);
if (ret != 0) { if (ret != 0) {
qDebug() << "inference_yolov8_model() failure! ret: " << ret; qDebug() << "inference_yolov8_model() failure! ret: " << ret;
return; return;
@@ -11,6 +11,7 @@ class AiEngineInferenceOpi5 : public AiEngineInference
public: public:
explicit AiEngineInferenceOpi5(QString modelPath, QObject *parent = nullptr); explicit AiEngineInferenceOpi5(QString modelPath, QObject *parent = nullptr);
~AiEngineInferenceOpi5(); ~AiEngineInferenceOpi5();
void initialize(int number) override;
public slots: public slots:
void performInferenceSlot(cv::Mat frame) override; void performInferenceSlot(cv::Mat frame) override;
@@ -21,5 +22,5 @@ private:
cv::Mat resizeToHalfAndAssigntoTopLeft640x640(const cv::Mat& inputFrame); cv::Mat resizeToHalfAndAssigntoTopLeft640x640(const cv::Mat& inputFrame);
void drawObjects(cv::Mat& image, const object_detect_result_list& result_list); void drawObjects(cv::Mat& image, const object_detect_result_list& result_list);
rknn_app_context_t mRrknnAppCtx; rknn_app_context_t mRrknnAppCtx0;
}; };
+24 -1
View File
@@ -155,7 +155,7 @@ int release_yolov8_model(rknn_app_context_t *app_ctx)
return 0; return 0;
} }
int inference_yolov8_model(rknn_app_context_t *app_ctx, image_buffer_t *img, object_detect_result_list *od_results) int inference_yolov8_model(rknn_app_context_t *app_ctx, image_buffer_t *img, object_detect_result_list *od_results, int core)
{ {
int ret; int ret;
image_buffer_t dst_img; image_buffer_t dst_img;
@@ -211,6 +211,29 @@ int inference_yolov8_model(rknn_app_context_t *app_ctx, image_buffer_t *img, obj
return -1; return -1;
} }
if (core == 1) {
ret = rknn_set_core_mask(app_ctx->rknn_ctx, RKNN_NPU_CORE_0);
//ret = rknn_set_core_mask(app_ctx->rknn_ctx, RKNN_NPU_CORE_0_1_2);
if (ret < 0) {
printf("rknn_set_core_mask(RKNN_NPU_CORE_0) fail! ret=%d\n", ret);
return -1;
}
}
else if (core == 2) {
ret = rknn_set_core_mask(app_ctx->rknn_ctx, RKNN_NPU_CORE_1);
if (ret < 0) {
printf("rknn_set_core_mask(RKNN_NPU_CORE_1) fail! ret=%d\n", ret);
return -1;
}
}
else if (core == 3) {
ret = rknn_set_core_mask(app_ctx->rknn_ctx, RKNN_NPU_CORE_2);
if (ret < 0) {
printf("rknn_set_core_mask(RKNN_NPU_CORE_1) fail! ret=%d\n", ret);
return -1;
}
}
// Run // Run
printf("rknn_run\n"); printf("rknn_run\n");
ret = rknn_run(app_ctx->rknn_ctx, nullptr); ret = rknn_run(app_ctx->rknn_ctx, nullptr);
+1 -1
View File
@@ -37,6 +37,6 @@ int init_yolov8_model(const char* model_path, rknn_app_context_t* app_ctx);
int release_yolov8_model(rknn_app_context_t* app_ctx); int release_yolov8_model(rknn_app_context_t* app_ctx);
int inference_yolov8_model(rknn_app_context_t* app_ctx, image_buffer_t* img, object_detect_result_list* od_results); int inference_yolov8_model(rknn_app_context_t* app_ctx, image_buffer_t* img, object_detect_result_list* od_results, int core);
#endif //_RKNN_DEMO_YOLOV8_H_ #endif //_RKNN_DEMO_YOLOV8_H_