add rknn conversion - install and use scripts, auto convert to rknn after AI training is done and put pt and rknn models to /azaion/models directory

This commit is contained in:
zxsanny
2025-02-21 14:26:54 +02:00
parent 6e4b0f40ef
commit ceb50bf48a
11 changed files with 205 additions and 46 deletions
+86
View File
@@ -0,0 +1,86 @@
names:
- Armored-Vehicle
- Truck
- Vehicle
- Artillery
- Shadow
- Trenches
- Military-men
- Tyre-tracks
- Additional-armored-tank
- Smoke
- Class-11
- Class-12
- Class-13
- Class-14
- Class-15
- Class-16
- Class-17
- Class-18
- Class-19
- Class-20
- Class-21
- Class-22
- Class-23
- Class-24
- Class-25
- Class-26
- Class-27
- Class-28
- Class-29
- Class-30
- Class-31
- Class-32
- Class-33
- Class-34
- Class-35
- Class-36
- Class-37
- Class-38
- Class-39
- Class-40
- Class-41
- Class-42
- Class-43
- Class-44
- Class-45
- Class-46
- Class-47
- Class-48
- Class-49
- Class-50
- Class-51
- Class-52
- Class-53
- Class-54
- Class-55
- Class-56
- Class-57
- Class-58
- Class-59
- Class-60
- Class-61
- Class-62
- Class-63
- Class-64
- Class-65
- Class-66
- Class-67
- Class-68
- Class-69
- Class-70
- Class-71
- Class-72
- Class-73
- Class-74
- Class-75
- Class-76
- Class-77
- Class-78
- Class-79
- Class-80
nc: 80
test: test/images
train: train/images
val: valid/images
-5
View File
@@ -1,5 +0,0 @@
import onnx
from ultralytics import YOLO
model = YOLO('azaion-2024-08-13.pt')
model.export(format='rknn')
+6
View File
@@ -0,0 +1,6 @@
from abc import ABC, abstractmethod
class Predictor(ABC):
@abstractmethod
def predict(self, frame):
pass
+5 -5
View File
@@ -5,14 +5,16 @@ from ultralytics import YOLO
import cv2
from time import sleep
model = YOLO('azaion-2024-08-13.pt')
from yolo_predictor import YOLOPredictor
# video_url = 'https://www.youtube.com/watch?v=d1n2fDOSo8c'
# stream = CamGear(source=video_url, stream_mode=True, logging=True).start()
predictor = YOLOPredictor()
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
input_name = sys.argv[1]
input_name = 'ForAI.mp4'
output_name = Path(input_name).stem + '_recognised.mp4'
v_input = cv2.VideoCapture(input_name)
@@ -23,9 +25,7 @@ while v_input.isOpened():
if frame is None:
break
results = model.track(frame, persist=True, tracker='bytetrack.yaml')
frame_detected = results[0].plot()
frame_detected = predictor.predict(frame)
frame_detected = cv2.resize(frame_detected, (640, 480))
cv2.imshow('Video', frame_detected)
sleep(0.01)
+20
View File
@@ -0,0 +1,20 @@
import cv2
import numpy as np
import yaml
from predictor import Predictor
from ultralytics import YOLO
class YOLOPredictor(Predictor):
def __init__(self):
self.model = YOLO('/azaion/models/azaion.onnx')
self.model.task = 'detect'
with open('data.yaml', 'r') as f:
data_yaml = yaml.safe_load(f)
class_names = data_yaml['names']
names = self.model.names
def predict(self, frame):
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
return results[0].plot()