add rknn conversion - install and use scripts, auto convert to rknn after AI training is done and put pt and rknn models to /azaion/models directory

This commit is contained in:
zxsanny
2025-02-21 14:26:54 +02:00
parent 6e4b0f40ef
commit ceb50bf48a
11 changed files with 205 additions and 46 deletions
+1 -1
View File
@@ -43,5 +43,5 @@ Linux
```
* fbgemm.dll error (Windows specific)
```
copypaste libomp140.x86_64.dll to C:\Windows\System32
copypaste tests\libomp140.x86_64.dll to C:\Windows\System32
```
+13 -12
View File
@@ -1,4 +1,4 @@
import os
from os import path
from dto.annotationClass import AnnotationClass
azaion = '/azaion'
@@ -6,21 +6,22 @@ prefix = 'azaion-'
images = 'images'
labels = 'labels'
data_dir = os.path.join(azaion, 'data')
data_images_dir = os.path.join(data_dir, images)
data_labels_dir = os.path.join(data_dir, labels)
data_dir = path.join(azaion, 'data')
data_images_dir = path.join(data_dir, images)
data_labels_dir = path.join(data_dir, labels)
processed_dir = os.path.join(azaion, 'data-processed')
processed_images_dir = os.path.join(processed_dir, images)
processed_labels_dir = os.path.join(processed_dir, labels)
processed_dir = path.join(azaion, 'data-processed')
processed_images_dir = path.join(processed_dir, images)
processed_labels_dir = path.join(processed_dir, labels)
corrupted_dir = os.path.join(azaion, 'data-corrupted')
corrupted_images_dir = os.path.join(corrupted_dir, images)
corrupted_labels_dir = os.path.join(corrupted_dir, labels)
corrupted_dir = path.join(azaion, 'data-corrupted')
corrupted_images_dir = path.join(corrupted_dir, images)
corrupted_labels_dir = path.join(corrupted_dir, labels)
sample_dir = path.join(azaion, 'data-sample')
datasets_dir = os.path.join(azaion, 'datasets')
models_dir = os.path.join(azaion, 'models')
datasets_dir = path.join(azaion, 'datasets')
models_dir = path.join(azaion, 'models')
annotation_classes = AnnotationClass.read_json()
date_format = '%Y-%m-%d'
+3
View File
@@ -29,5 +29,8 @@ pip install rknn_toolkit2-1.6.0+81f21f4d-cp311-cp311-linux_x86_64.whl
pip install "numpy<2.0"
cd ../../../
git clone https://github.com/airockchip/rknn_model_zoo.git
sed -i -E "s#(DATASET_PATH = ').+(')#\1/azaion/data-sample/azaion_subset.txt\2 #" rknn_model_zoo/examples/yolov8/python/convert.py
conda deactivate
conda deactivate
+4 -5
View File
@@ -1,4 +1,4 @@
# Use converter PT to ONNX
# PT to ONNX
cd rknn-convert/ultralytics_yolov8/
cp --verbose /azaion/models/azaion.pt .
source env/bin/activate
@@ -8,13 +8,12 @@ python ./ultralytics/engine/exporter.py
cp --verbose azaion.onnx ../
cd ..
deactivate
cp --verbose azaion.onnx /azaion/models/
# Use converter ONNX to RKNN
# ONNX to RKNN
source ~/miniconda/bin/activate
conda activate toolkit2
cd rknn_model_zoo/examples/yolov8/python
python convert.py ../../../../azaion.onnx rk3588 i8
cp --verbose ../model/yolov8.rknn /azaion/models/azaion.rknn
python convert.py ../../../../azaion.onnx rk3588 i8 /azaion/models/azaion.rknn
conda deactivate
conda deactivate
+5
View File
@@ -25,6 +25,11 @@ mkdir data-processed
chown -R azaionsftp:azaionsftp data-processed
mount -o bind /azaion/data-processed data-processed
chown -R zxsanny:sftp /azaion-media/nogps-flights
mkdir nogps-flights
chown -R azaionsftp:azaionsftp nogps-flights
mount -o bind /azaion-media/nogps-flights nogps-flights
chmod -R 755 /home/azaionsftp/
+86
View File
@@ -0,0 +1,86 @@
names:
- Armored-Vehicle
- Truck
- Vehicle
- Artillery
- Shadow
- Trenches
- Military-men
- Tyre-tracks
- Additional-armored-tank
- Smoke
- Class-11
- Class-12
- Class-13
- Class-14
- Class-15
- Class-16
- Class-17
- Class-18
- Class-19
- Class-20
- Class-21
- Class-22
- Class-23
- Class-24
- Class-25
- Class-26
- Class-27
- Class-28
- Class-29
- Class-30
- Class-31
- Class-32
- Class-33
- Class-34
- Class-35
- Class-36
- Class-37
- Class-38
- Class-39
- Class-40
- Class-41
- Class-42
- Class-43
- Class-44
- Class-45
- Class-46
- Class-47
- Class-48
- Class-49
- Class-50
- Class-51
- Class-52
- Class-53
- Class-54
- Class-55
- Class-56
- Class-57
- Class-58
- Class-59
- Class-60
- Class-61
- Class-62
- Class-63
- Class-64
- Class-65
- Class-66
- Class-67
- Class-68
- Class-69
- Class-70
- Class-71
- Class-72
- Class-73
- Class-74
- Class-75
- Class-76
- Class-77
- Class-78
- Class-79
- Class-80
nc: 80
test: test/images
train: train/images
val: valid/images
-5
View File
@@ -1,5 +0,0 @@
import onnx
from ultralytics import YOLO
model = YOLO('azaion-2024-08-13.pt')
model.export(format='rknn')
+6
View File
@@ -0,0 +1,6 @@
from abc import ABC, abstractmethod
class Predictor(ABC):
@abstractmethod
def predict(self, frame):
pass
+5 -5
View File
@@ -5,14 +5,16 @@ from ultralytics import YOLO
import cv2
from time import sleep
model = YOLO('azaion-2024-08-13.pt')
from yolo_predictor import YOLOPredictor
# video_url = 'https://www.youtube.com/watch?v=d1n2fDOSo8c'
# stream = CamGear(source=video_url, stream_mode=True, logging=True).start()
predictor = YOLOPredictor()
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
input_name = sys.argv[1]
input_name = 'ForAI.mp4'
output_name = Path(input_name).stem + '_recognised.mp4'
v_input = cv2.VideoCapture(input_name)
@@ -23,9 +25,7 @@ while v_input.isOpened():
if frame is None:
break
results = model.track(frame, persist=True, tracker='bytetrack.yaml')
frame_detected = results[0].plot()
frame_detected = predictor.predict(frame)
frame_detected = cv2.resize(frame_detected, (640, 480))
cv2.imshow('Video', frame_detected)
sleep(0.01)
+20
View File
@@ -0,0 +1,20 @@
import cv2
import numpy as np
import yaml
from predictor import Predictor
from ultralytics import YOLO
class YOLOPredictor(Predictor):
def __init__(self):
self.model = YOLO('/azaion/models/azaion.onnx')
self.model.task = 'detect'
with open('data.yaml', 'r') as f:
data_yaml = yaml.safe_load(f)
class_names = data_yaml['names']
names = self.model.names
def predict(self, frame):
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
return results[0].plot()
+62 -18
View File
@@ -1,10 +1,10 @@
import os
import random
import subprocess
from os import path, replace, remove, listdir, makedirs, scandir
from os.path import abspath
import shutil
import subprocess
from datetime import datetime
from os import path, replace, listdir, makedirs, scandir
from os.path import abspath
from pathlib import Path
from ultralytics import YOLO
from constants import (processed_images_dir,
@@ -12,13 +12,14 @@ from constants import (processed_images_dir,
annotation_classes,
prefix, date_format,
datasets_dir, models_dir,
corrupted_images_dir, corrupted_labels_dir)
corrupted_images_dir, corrupted_labels_dir, sample_dir)
today_folder = f'{prefix}{datetime.now():{date_format}}'
today_dataset = path.join(datasets_dir, today_folder)
train_set = 70
valid_set = 20
test_set = 10
old_images_percentage = 75
DEFAULT_CLASS_NUM = 80
@@ -26,32 +27,33 @@ DEFAULT_CLASS_NUM = 80
def form_dataset(from_date: datetime):
makedirs(today_dataset, exist_ok=True)
images = []
old_images = []
with scandir(processed_images_dir) as imd:
for image_file in imd:
if not image_file.is_file():
continue
mod_time = datetime.fromtimestamp(image_file.stat().st_mtime)
mod_time = datetime.fromtimestamp(image_file.stat().st_mtime).replace(hour=0, minute=0, second=0, microsecond=0)
if from_date is None:
images.append(image_file)
elif mod_time > from_date:
images.append(image_file)
else: # gather old images as well in order to avoid overfitting on the only new data.
old_images.append(image_file)
random.shuffle(old_images)
old_images_size = int(len(old_images) * old_images_percentage / 100.0)
print(f'Got {len(images)} new images and {old_images_size} of old images (to prevent overfitting). Shuffling them...')
images.extend(old_images[:old_images_size])
print('shuffling images')
random.shuffle(images)
train_size = int(len(images) * train_set / 100.0)
valid_size = int(len(images) * valid_set / 100.0)
print(f'copy train dataset, size: {train_size} annotations')
copy_annotations(images[:train_size], 'train')
print(f'copy valid set, size: {valid_size} annotations')
copy_annotations(images[train_size:train_size + valid_size], 'valid')
print(f'copy test set, size: {len(images) - train_size - valid_size} annotations')
copy_annotations(images[train_size + valid_size:], 'test')
print('creating yaml...')
create_yaml()
@@ -65,6 +67,8 @@ def copy_annotations(images, folder):
makedirs(corrupted_images_dir, exist_ok=True)
makedirs(corrupted_labels_dir, exist_ok=True)
copied = 0
print(f'Copying annotations to {destination_images} and {destination_labels} folders:')
for image in images:
label_name = f'{Path(image.path).stem}.txt'
label_path = path.join(processed_labels_dir, label_name)
@@ -75,6 +79,10 @@ def copy_annotations(images, folder):
shutil.copy(image.path, path.join(corrupted_images_dir, image.name))
shutil.copy(label_path, path.join(corrupted_labels_dir, label_name))
print(f'Label {label_path} is corrupted! Copy with its image to the corrupted directory ({corrupted_labels_dir})')
copied = copied + 1
if copied % 1000 == 0:
print(f'{copied} copied...')
print(f'Copied all {copied} annotations to {destination_images} and {destination_labels} folders')
def check_label(label_path):
@@ -90,6 +98,7 @@ def check_label(label_path):
def create_yaml():
print('creating yaml...')
lines = ['names:']
for c in annotation_classes:
lines.append(f'- {annotation_classes[c].name}')
@@ -136,8 +145,7 @@ def get_latest_model():
last_model = sorted_dates[-1]
return last_model['date'], last_model['path']
def train_dataset(existing_date=None):
def train_dataset(existing_date=None, from_scratch=False):
latest_date, latest_model = get_latest_model()
if existing_date is not None:
@@ -148,12 +156,17 @@ def train_dataset(existing_date=None):
cur_folder = today_folder
cur_dataset = today_dataset
model_name = latest_model if latest_model is not None and path.isfile(latest_model) else 'yolov8m.yaml'
model_name = latest_model if latest_model is not None and path.isfile(latest_model) and not from_scratch else 'yolov8m.yaml'
print(f'Initial model: {model_name}')
model = YOLO(model_name)
yaml = abspath(path.join(cur_dataset, 'data.yaml'))
results = model.train(data=yaml, epochs=100, batch=57, imgsz=640, save_period=1)
results = model.train(data=yaml,
epochs=120,
batch=14,
imgsz=1280,
save_period=1,
workers=24)
model_dir = path.join(models_dir, cur_folder)
shutil.copytree(results.save_dir, model_dir)
@@ -164,9 +177,40 @@ def train_dataset(existing_date=None):
def convert2rknn():
subprocess.call(['bash', 'convert.sh'], cwd="./orangepi5")
latest_date, latest_model = get_latest_model()
model = YOLO(latest_model)
model.export(format="onnx")
pass
def form_data_sample(size=300):
images = []
with scandir(processed_images_dir) as imd:
for image_file in imd:
if not image_file.is_file():
continue
images.append(image_file)
print('shuffling images')
random.shuffle(images)
images = images[:size]
shutil.rmtree(sample_dir, ignore_errors=True)
makedirs(sample_dir, exist_ok=True)
lines = []
for image in images:
shutil.copy(image.path, path.join(sample_dir, image.name))
lines.append(f'./{image.name}')
with open(path.join(sample_dir, 'azaion_subset.txt'), 'w', encoding='utf-8') as f:
f.writelines([f'{line}\n' for line in lines])
def validate(model_path):
model = YOLO(model_path)
metrics = model.val()
pass
if __name__ == '__main__':
train_dataset()
train_dataset('2024-10-26', from_scratch=True)
validate(path.join('runs', 'detect', 'train7', 'weights', 'best.pt'))
form_data_sample(500)
convert2rknn()