mirror of
https://github.com/azaion/autopilot.git
synced 2026-04-23 00:26:33 +00:00
Chaneges to opi_rtsp test application
- refactoring - can use normal YOLOv8 files converted to ONNX format - does not work with azaion ONNX files!
This commit is contained in:
@@ -0,0 +1,93 @@
|
||||
#include <QDebug>
|
||||
#include <QThread>
|
||||
#include "aiengineinferenceopencvonnx.h"
|
||||
|
||||
|
||||
const int INFERENCE_SQUARE_WIDTH = 640;
|
||||
const int INFERENCE_SQUARE_HEIGHT = 640;
|
||||
|
||||
|
||||
AiEngineInferenceOpencvOnnx::AiEngineInferenceOpencvOnnx(QString modelPath, QObject *parent)
|
||||
: AiEngineInference{modelPath, parent},
|
||||
mInference(modelPath.toStdString(), cv::Size(640, 640), "classes.txt")
|
||||
{
|
||||
//qDebug() << "TUOMAS test mModelPath=" << mModelPath;
|
||||
//mEngine = new InferenceEngine(modelPath.toStdString());
|
||||
//mInference = new Inference(modelPath.toStdString(), cv::Size(INFERENCE_SQUARE_WIDTH, INFERENCE_SQUARE_HEIGHT), "classes.txt");
|
||||
}
|
||||
|
||||
|
||||
cv::Mat resizeAndPad(const cv::Mat& src)
|
||||
{
|
||||
// Calculate the aspect ratio
|
||||
float aspectRatio = static_cast<float>(src.cols) / src.rows;
|
||||
|
||||
// Determine new size while maintaining aspect ratio
|
||||
int newWidth = src.cols;
|
||||
int newHeight = src.rows;
|
||||
if (src.cols > INFERENCE_SQUARE_WIDTH || src.rows > INFERENCE_SQUARE_HEIGHT) {
|
||||
if (aspectRatio > 1)
|
||||
{
|
||||
// Width is greater than height
|
||||
newWidth = INFERENCE_SQUARE_WIDTH;
|
||||
newHeight = static_cast<int>(INFERENCE_SQUARE_WIDTH / aspectRatio);
|
||||
}
|
||||
else {
|
||||
// Height is greater than or equal to width
|
||||
newHeight = INFERENCE_SQUARE_HEIGHT;
|
||||
newWidth = static_cast<int>(INFERENCE_SQUARE_HEIGHT * aspectRatio);
|
||||
}
|
||||
}
|
||||
|
||||
// Resize the original image if needed
|
||||
cv::Mat resized;
|
||||
cv::resize(src, resized, cv::Size(newWidth, newHeight));
|
||||
|
||||
// Create a new 640x640 image with a black background
|
||||
cv::Mat output(INFERENCE_SQUARE_HEIGHT, INFERENCE_SQUARE_WIDTH, src.type(), cv::Scalar(0, 0, 0));
|
||||
|
||||
// Copy the resized image to the top-left corner of the new image
|
||||
resized.copyTo(output(cv::Rect(0, 0, resized.cols, resized.rows)));
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
|
||||
void AiEngineInferenceOpencvOnnx::performInferenceSlot(cv::Mat frame)
|
||||
{
|
||||
try {
|
||||
//qDebug() << "performInferenceSlot() in thread: " << QThread::currentThreadId();
|
||||
|
||||
mActive = true;
|
||||
|
||||
cv::Mat scaledImage = resizeAndPad(frame);
|
||||
std::vector<Detection> detections = mInference.runInference(scaledImage);
|
||||
AiEngineInferenceResult result;
|
||||
|
||||
//qDebug() << "performInferenceSlot() found " << detections.size() << " objects";
|
||||
|
||||
for (uint i = 0; i < detections.size(); ++i) {
|
||||
const Detection &detection = detections[i];
|
||||
|
||||
// Add detected objects to the results
|
||||
AiEngineObject object;
|
||||
object.classId = detection.class_id;
|
||||
object.propability = detection.confidence;
|
||||
object.rectangle.top = detection.box.y;
|
||||
object.rectangle.left = detection.box.x;
|
||||
object.rectangle.bottom = detection.box.y + detection.box.height;
|
||||
object.rectangle.right = detection.box.x + detection.box.width;
|
||||
result.objects.append(object);
|
||||
}
|
||||
|
||||
if (result.objects.empty() == false) {
|
||||
result.frame = mInference.drawLabels(scaledImage, detections);
|
||||
emit resultsReady(result);
|
||||
}
|
||||
|
||||
mActive = false;
|
||||
}
|
||||
catch (const cv::Exception& e) {
|
||||
std::cerr << "performInferenceSlot() Error: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user