Add compile option to use YOLOv8 ONNX models for testing.

- added qmake option yolo_onnx to use normal YOLOv8 ONNX models. This makes possible to test
  gimbals camera inside without real model.
- reduced confidence threshold requirement in AiEngineInferencevOnnxRuntime from 0.5 to 0.2
- make printing prettier with ONNX Runtime
- removed unnecessary cv::Mat::clone()

Type: Improvement
Issue: https://denyspopov.atlassian.net/browse/AZ-39
This commit is contained in:
Tuomas Järvinen
2024-08-18 16:59:04 +03:00
parent 86c0a7d5c4
commit 022e4a1200
4 changed files with 43 additions and 34 deletions
+2 -2
View File
@@ -4,7 +4,7 @@
### How to convert Azaion AI model file to ONNX format
```bash
yolo export model=azaion-2024-06-28.pt dynamic=False format=onnx imgsz=640,640
yolo export model=azaion-2024-08-13.pt dynamic=False format=onnx imgsz=640,640
```
## How to use application locally on a Linux PC.
@@ -74,7 +74,7 @@ qmake6 && make
```bash
cd autopilot/misc/rtsp_ai_player
qmake6 && make
./rtsp_ai_player ~/azaion/models/azaion/azaion-2024-06-28.onnx
./rtsp_ai_player ~/azaion/models/azaion/azaion-2024-08-13.onnx
```
### Compile and run rtsp_ai_player with YOLOv8 medium model and gimbal camera:
```bash
+3 -3
View File
@@ -92,16 +92,16 @@ void AiEngine::frameReceivedSlot(cv::Mat frame)
if (mInference->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame(frame.clone());
emit inferenceFrame(frame);
}
#ifdef OPI5_BUILD
else if (mInference2->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame2(frame.clone());
emit inferenceFrame2(frame);
}
else if (mInference3->isActive() == false) {
//qDebug() << "AiEngine. Inference thread is free. Sending frame to it.";
emit inferenceFrame3(frame.clone());
emit inferenceFrame3(frame);
}
#endif
}
+7 -1
View File
@@ -15,15 +15,21 @@ else {
message("Not using real gimbal camera.")
}
yolo_onnx {
QMAKE_CXXFLAGS += -DYOLO_ONNX
}
opi5 {
message("OPI5 build")
CONFIG += link_pkgconfig
PKGCONFIG += opencv4 librga stb libturbojpeg
INCLUDEPATH += /usr/include/rga
QMAKE_CXXFLAGS += -DOPI5_BUILD
LIBS += /usr/local/lib/librknnrt.so
SOURCES += $$PWD/src-opi5/*.c $$PWD/src-opi5/*.cpp $$PWD/src-opi5/*.cc
HEADERS += $$PWD/src-opi5/*.h
} else:opencv {
}
else:opencv {
message("OpenCV build")
message("You must use YOLOv8 ONNX files. Azaion model does not work!")
message("OpenCV must be version 4.10.0 installed to /usr/local/")
@@ -4,7 +4,7 @@
#include "aiengineinferenceonnxruntime.h"
static const float confThreshold = 0.4f;
static const float confThreshold = 0.2f;
static const float iouThreshold = 0.4f;
static const float maskThreshold = 0.5f;
@@ -15,21 +15,7 @@ AiEngineInferencevOnnxRuntime::AiEngineInferencevOnnxRuntime(QString modelPath,
{
qDebug() << "TUOMAS AiEngineInferencevOnnxRuntime() mModelPath=" << mModelPath;
/*
mClassNames = {
"Armoured vehicle",
"Truck",
"Vehicle",
"Artillery",
"Shadow artillery",
"Trenches",
"Military man",
"Tyre tracks",
"Additional protection tank",
"Smoke"
};
*/
#ifdef YOLO_ONNX
mClassNames = {
"person",
"bicycle",
@@ -112,6 +98,20 @@ AiEngineInferencevOnnxRuntime::AiEngineInferencevOnnxRuntime(QString modelPath,
"hair drier",
"toothbrush"
};
#else
mClassNames = {
"Armoured vehicle",
"Truck",
"Vehicle",
"Artillery",
"Shadow artillery",
"Trenches",
"Military man",
"Tyre tracks",
"Additional protection tank",
"Smoke"
};
#endif
}
@@ -122,28 +122,31 @@ cv::Mat AiEngineInferencevOnnxRuntime::drawLabels(const cv::Mat &image, const st
for (const auto &detection : detections)
{
cv::rectangle(result, detection.box, cv::Scalar(0, 255, 0), 2);
std::string label = mClassNames[detection.classId].toStdString() + ": " + std::to_string(detection.conf);
int confidence = roundf(detection.conf * 100);
std::string label = mClassNames[detection.classId].toStdString() + ": " + std::to_string(confidence) + "%";
int baseLine;
cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_COMPLEX, 0.5, 1, &baseLine);
cv::Point labelOrigin(detection.box.x, detection.box.y - labelSize.height - baseLine);
cv::rectangle(
result,
cv::Point(detection.box.x, detection.box.y - labelSize.height),
cv::Point(detection.box.x + labelSize.width, detection.box.y + baseLine),
labelOrigin,
cv::Point(detection.box.x + labelSize.width, detection.box.y),
cv::Scalar(255, 255, 255),
cv::FILLED);
cv::putText(
result,
label,
cv::Point(
detection.box.x,
detection.box.y),
cv::FONT_HERSHEY_SIMPLEX,
cv::Point(detection.box.x, detection.box.y - baseLine + 2),
cv::FONT_HERSHEY_COMPLEX,
0.5,
cv::Scalar(0, 0, 0),
1);
1,
cv::LINE_AA);
}
return result;
@@ -152,15 +155,14 @@ cv::Mat AiEngineInferencevOnnxRuntime::drawLabels(const cv::Mat &image, const st
void AiEngineInferencevOnnxRuntime::performInferenceSlot(cv::Mat frame)
{
//qDebug() << __PRETTY_FUNCTION__;
qDebug() << __PRETTY_FUNCTION__;
try {
mActive = true;
cv::Mat scaledImage = resizeAndPad(frame);
//cv::imwrite("/tmp/frame.png", scaledImage);
std::vector<Yolov8Result> detections = mPredictor.predict(scaledImage);
#ifdef YOLO_ONNX
// Only keep following detected objects.
// car = 2
// train = 6
@@ -174,6 +176,7 @@ void AiEngineInferencevOnnxRuntime::performInferenceSlot(cv::Mat frame)
result.classId != 46;
});
detections.erase(it, detections.end());
#endif
AiEngineInferenceResult result;