- addedd NCNN model support to rtsp_ai_player

- added printing of inference FPS
- simple AI test bench which can be used to compare models
This commit is contained in:
Tuomas Järvinen
2024-10-02 19:15:49 +02:00
parent ef137fbc4b
commit d4779b1bb0
12 changed files with 555 additions and 763 deletions
@@ -70,6 +70,7 @@ void AiEngineInferenceOpi5::freeImageBuffer(image_buffer_t& imgBuffer)
cv::Mat AiEngineInferenceOpi5::resizeToHalfAndAssigntoTopLeft640x640(const cv::Mat& inputFrame)
{
/*
// Resize input frame to half size
cv::Mat resizedFrame;
cv::resize(inputFrame, resizedFrame, cv::Size(), 0.5, 0.5);
@@ -81,6 +82,25 @@ cv::Mat AiEngineInferenceOpi5::resizeToHalfAndAssigntoTopLeft640x640(const cv::M
cv::Rect roi(0, 0, resizedFrame.cols, resizedFrame.rows);
resizedFrame.copyTo(outputFrame(roi));
return outputFrame;
*/
const int targetWidth = 640;
const int targetHeight = 640;
float aspectRatio = static_cast<float>(inputFrame.cols) / static_cast<float>(inputFrame.rows);
int newWidth = targetWidth;
int newHeight = static_cast<int>(targetWidth / aspectRatio);
if (newHeight > targetHeight) {
newHeight = targetHeight;
newWidth = static_cast<int>(targetHeight * aspectRatio);
}
cv::Mat resizedFrame;
cv::resize(inputFrame, resizedFrame, cv::Size(newWidth, newHeight));
cv::Mat outputFrame = cv::Mat::zeros(targetHeight, targetWidth, inputFrame.type());
cv::Rect roi(cv::Point(0, 0), resizedFrame.size());
resizedFrame.copyTo(outputFrame(roi));
return outputFrame;
}
@@ -91,7 +111,9 @@ void AiEngineInferenceOpi5::drawObjects(cv::Mat& image, const object_detect_resu
const object_detect_result& result = result_list.results[i];
if (result.cls_id >= mClassNames.size()) {
continue;
//result.cls_id = result.cls_id % mClassNames.size();
qDebug() << "Class id >= mClassNames.size() Reducing it.";
//continue;
}
fprintf(stderr, "TUOMAS [%d] prop = %f\n", i, result.prop);
@@ -106,7 +128,7 @@ void AiEngineInferenceOpi5::drawObjects(cv::Mat& image, const object_detect_resu
// Text
char c_text[256];
//sprintf(c_text, "%s %d%%", coco_cls_to_name(result.cls_id), (int)(round(result.prop * 100)));
sprintf(c_text, "%s %d%%", mClassNames[result.cls_id].toStdString().c_str(), (int)(round(result.prop * 100)));
sprintf(c_text, "%s %d%%", mClassNames[result.cls_id % mClassNames.size()].toStdString().c_str(), (int)(round(result.prop * 100)));
cv::Point textOrg(left, top - 5);
cv::putText(image, std::string(c_text), textOrg, cv::FONT_HERSHEY_COMPLEX, result.prop, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);
}
@@ -131,23 +153,25 @@ void AiEngineInferenceOpi5::performInferenceSlot(cv::Mat frame)
return;
}
AiEngineInferenceResult result;
for (int i = 0; i < od_results.count; i++) {
object_detect_result *det_result = &(od_results.results[i]);
if (od_results.count > 0) {
AiEngineInferenceResult result;
for (int i = 0; i < od_results.count; i++) {
object_detect_result *det_result = &(od_results.results[i]);
qDebug() << "TUOMAS box:" << det_result->box.top << det_result->box.left << det_result->box.bottom << det_result->box.right;
AiEngineObject object;
object.classId = det_result->cls_id;
object.propability = det_result->prop;
object.rectangle.top = det_result->box.top;
object.rectangle.left = det_result->box.left;
object.rectangle.bottom = det_result->box.bottom;
object.rectangle.right = det_result->box.right;
result.objects.append(object);
}
AiEngineObject object;
object.classId = det_result->cls_id;
object.propability = det_result->prop;
object.rectangle.top = det_result->box.top;
object.rectangle.left = det_result->box.left;
object.rectangle.bottom = det_result->box.bottom;
object.rectangle.right = det_result->box.right;
result.objects.append(object);
drawObjects(scaledFrame, od_results);
result.frame = scaledFrame.clone();
emit resultsReady(result);
}
drawObjects(scaledFrame, od_results);
result.frame = scaledFrame.clone();
emit resultsReady(result);
mActive = false;
}