Fixed PC build to work with ONNX Yolov10 files

This commit is contained in:
Tuomas Järvinen
2024-07-09 20:34:21 +02:00
parent c4af0cc461
commit 607ac22b46
3 changed files with 24 additions and 11 deletions
+3 -4
View File
@@ -47,12 +47,11 @@ void AiEngine::inferenceResultsReceivedSlot(AiEngineInferenceResult result)
{
//qDebug() << "AiEngine got inference results in thread: " << QThread::currentThreadId();
#ifdef OPI5_BUILD
mGimbalControl->inferenceResultSlot(result);
cv::imshow("Received Frame", result.frame);
#endif
//#ifndef OPI5_BUILD
//cv::imshow("Received Frame", result.frame);
//#endif
cv::imshow("Received Frame", result.frame);
}
+4 -3
View File
@@ -5,16 +5,17 @@ PKGCONFIG += opencv4
MOC_DIR = moc
OBJECTS_DIR = obj
SOURCES = $$PWD/*.cpp
SOURCES = $$PWD/*.cpp $$PWD/../../misc/camera/a8_remote/remoteControl.cpp
HEADERS = $$PWD/*.h
INCLUDEPATH += $$PWD/../../misc/camera/a8_remote
opi5 {
message("OPI5 build")
PKGCONFIG += opencv4 librga stb libturbojpeg
INCLUDEPATH += /usr/include/rga $$PWD/../../misc/camera/a8_remote
INCLUDEPATH += /usr/include/rga
QMAKE_CXXFLAGS += -DOPI5_BUILD
LIBS += /usr/local/lib/librknnrt.so
SOURCES += $$PWD/src-opi5/*.c $$PWD/src-opi5/*.cpp $$PWD/src-opi5/*.cc $$PWD/../../misc/camera/a8_remote/remoteControl.cpp
SOURCES += $$PWD/src-opi5/*.c $$PWD/src-opi5/*.cpp $$PWD/src-opi5/*.cc
HEADERS += $$PWD/src-opi5/*.h
} else {
message("ONNX build")
@@ -2,17 +2,18 @@
#include <QThread>
#include "aiengineinferenceonnx.h"
AiEngineInferenceOnnx::AiEngineInferenceOnnx(QString modelPath, QObject *parent)
: AiEngineInference{modelPath, parent}
{
qDebug() << "TUOMAS test mModelPath=" << mModelPath;
//qDebug() << "TUOMAS test mModelPath=" << mModelPath;
mEngine = new InferenceEngine(modelPath.toStdString());
}
void AiEngineInferenceOnnx::performInferenceSlot(cv::Mat frame)
{
qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
//qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
mActive = true;
@@ -20,12 +21,24 @@ void AiEngineInferenceOnnx::performInferenceSlot(cv::Mat frame)
int orig_height = frame.rows;
std::vector<float> input_tensor_values = mEngine->preprocessImage(frame);
std::vector<float> results = mEngine->runInference(input_tensor_values);
float confidence_threshold = 0.5;
float confidence_threshold = 0.4;
std::vector<Detection> detections = mEngine->filterDetections(results, confidence_threshold, mEngine->input_shape[2], mEngine->input_shape[3], orig_width, orig_height);
AiEngineInferenceResult result;
for (uint32_t i = 0; i < detections.size(); i++) {
const Detection &detection = detections[i];
AiEngineObject object;
object.classId = detection.class_id;
object.propability = detection.confidence;
object.rectangle.top = detection.bbox.y;
object.rectangle.left = detection.bbox.x;
object.rectangle.bottom = detection.bbox.y + detection.bbox.height;
object.rectangle.right = detection.bbox.x + detection.bbox.width;
result.objects.append(object);
}
result.frame = mEngine->draw_labels(frame.clone(), detections);
result.objects = 1;
emit resultsReady(result);
mActive = false;