Minor fixes to NCNN inference

- reduced logging
- emit also empty inference results to AiEngine
This commit is contained in:
Tuomas Järvinen
2024-10-24 18:57:49 +02:00
parent e3643ea622
commit de63892725
@@ -1,6 +1,5 @@
#include <QDebug>
#include <QThread>
#include <iostream>
#include <vector>
#include "aiengineinferencencnn.h"
@@ -23,7 +22,7 @@ char* getCharPointerCopy(const QString& modelPath) {
AiEngineInferencevNcnn::AiEngineInferencevNcnn(QString modelPath, QObject *parent) :
AiEngineInference{modelPath, parent}
{
qDebug() << "TUOMAS AiEngineInferencevNcnn() mModelPath=" << mModelPath;
qDebug() << "AiEngineInferencevNcnn() mModelPath=" << mModelPath;
yolov8.opt.num_threads = 4;
yolov8.opt.use_vulkan_compute = false;
@@ -32,9 +31,6 @@ AiEngineInferencevNcnn::AiEngineInferencevNcnn(QString modelPath, QObject *paren
char *model = getCharPointerCopy(modelPath);
char *param = getCharPointerCopy(paramPath);
qDebug() << "model:" << model;
qDebug() << "param:" << param;
yolov8.load_param(param);
yolov8.load_model(model);
}
@@ -229,8 +225,6 @@ int AiEngineInferencevNcnn::detect_yolov8(const cv::Mat& bgr, std::vector<Object
const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
in_pad.substract_mean_normalize(0, norm_vals);
auto start = std::chrono::high_resolution_clock::now();
ncnn::Extractor ex = yolov8.create_extractor();
ex.input("in0", in_pad);
@@ -283,9 +277,6 @@ int AiEngineInferencevNcnn::detect_yolov8(const cv::Mat& bgr, std::vector<Object
objects[i].rect.height = y1 - y0;
}
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
std::cout << "Time taken: " << duration.count() << " milliseconds" << std::endl;
return 0;
}
@@ -340,8 +331,7 @@ static cv::Mat draw_objects(const cv::Mat& bgr, const std::vector<Object>& objec
cv::Scalar cc(color[0], color[1], color[2]);
fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
//fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob, obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(image, obj.rect, cc, 2);
@@ -378,26 +368,23 @@ void AiEngineInferencevNcnn::performInferenceSlot(cv::Mat frame)
std::vector<Object> objects;
detect_yolov8(scaledImage, objects);
if (objects.empty() == false) {
AiEngineInferenceResult result;
result.frame = draw_objects(scaledImage, objects);
AiEngineInferenceResult result;
result.frame = draw_objects(scaledImage, objects);
for (uint i = 0; i < objects.size(); i++) {
const Object &detection = objects[i];
AiEngineObject object;
object.classId = detection.label;
object.classStr = mClassNames[detection.label];
object.propability = detection.prob;
object.rectangle.top = detection.rect.y;
object.rectangle.left = detection.rect.x;
object.rectangle.bottom = detection.rect.y + detection.rect.height;
object.rectangle.right = detection.rect.x + detection.rect.width;
result.objects.append(object);
}
emit resultsReady(result);
for (uint i = 0; i < objects.size(); i++) {
const Object &detection = objects[i];
AiEngineObject object;
object.classId = detection.label;
object.classStr = mClassNames[detection.label];
object.propability = detection.prob;
object.rectangle.top = detection.rect.y;
object.rectangle.left = detection.rect.x;
object.rectangle.bottom = detection.rect.y + detection.rect.height;
object.rectangle.right = detection.rect.x + detection.rect.width;
result.objects.append(object);
}
emit resultsReady(result);
mActive = false;
}