add rknn conversion - install and use scripts, auto convert to rknn after AI training is done and put pt and rknn models to /azaion/models directory

This commit is contained in:
zxsanny
2024-10-03 11:41:22 +03:00
parent c234e8b190
commit 31c44943e8
8 changed files with 122 additions and 33 deletions
+4 -1
View File
@@ -4,4 +4,7 @@
datasets/ datasets/
runs/ runs/
models/ models/
*.pt *.pt
*.onnx
*.rknn
*.mp4
+1 -1
View File
@@ -32,7 +32,7 @@ Linux
pip install --upgrade huggingface_hub pip install --upgrade huggingface_hub
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
pip install git+https://github.com/airockchip/ultralytics_yolov8.git pip install git+https://github.com/airockchip/ultralytics_yolov8.git
pip install albumentations onnx pip install albumentations
``` ```
**3. Fix possible problems** **3. Fix possible problems**
+33
View File
@@ -0,0 +1,33 @@
mkdir rknn-convert
cd rknn-convert
# Install converter PT to ONNX
git clone https://github.com/airockchip/ultralytics_yolov8
cd ultralytics_yolov8
sudo apt install python3.12-venv
python3 -m venv env
source env/bin/activate
pip install .
pip install onnx
cp ultralytics/cfg/default.yaml ultralytics/cfg/default_backup.yaml
sed -i -E "s/(model: ).+( #.+)/\1azaion.pt\2/" ultralytics/cfg/default.yaml
cd ..
deactivate
# Install converter ONNX to RKNN
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
chmod +x miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
source ~/miniconda/bin/activate
conda create -n toolkit2 -y python=3.11
conda activate toolkit2
git clone https://github.com/rockchip-linux/rknn-toolkit2.git
cd rknn-toolkit2/rknn-toolkit2/packages
pip install -r requirements_cp311-1.6.0.txt
pip install rknn_toolkit2-1.6.0+81f21f4d-cp311-cp311-linux_x86_64.whl
pip install "numpy<2.0"
cd ../../../
git clone https://github.com/airockchip/rknn_model_zoo.git
conda deactivate
conda deactivate
+20
View File
@@ -0,0 +1,20 @@
# Use converter PT to ONNX
cd rknn-convert/ultralytics_yolov8/
cp --verbose /azaion/models/azaion.pt .
source env/bin/activate
pip install onnx
export PYTHONPATH=./
python ./ultralytics/engine/exporter.py
cp --verbose azaion.onnx ../
cd ..
deactivate
# Use converter ONNX to RKNN
source ~/miniconda/bin/activate
conda activate toolkit2
cd rknn_model_zoo/examples/yolov8/python
python convert.py ../../../../azaion.onnx rk3588 i8
cp --verbose ../model/yolov8.rknn /azaion/models/azaion.rknn
conda deactivate
conda deactivate
+5
View File
@@ -0,0 +1,5 @@
1. Download latest release from here https://joshua-riek.github.io/ubuntu-rockchip-download/boards/orangepi-5.html
f.e. https://github.com/Joshua-Riek/ubuntu-rockchip/releases/download/v2.3.2/ubuntu-22.04-preinstalled-desktop-arm64-orangepi-5.img.xz
but look to the more recent version on ubuntu 22.04
2. Write the image to the microsd using https://bztsrc.gitlab.io/usbimager/ (sudo ./usbimager on linux) (or use BalenaEtcher)
+6
View File
@@ -0,0 +1,6 @@
from preprocessing import preprocess_annotations
from train import train_dataset, convert2rknn
preprocess_annotations()
train_dataset('2024-10-01')
convert2rknn()
+26 -21
View File
@@ -116,30 +116,35 @@ def process_image(img_ann):
)) ))
def main(): def preprocess_annotations():
os.makedirs(processed_images_dir, exist_ok=True) os.makedirs(processed_images_dir, exist_ok=True)
os.makedirs(processed_labels_dir, exist_ok=True) os.makedirs(processed_labels_dir, exist_ok=True)
while True:
processed_images = set(f.name for f in os.scandir(processed_images_dir))
images = []
with os.scandir(data_images_dir) as imd:
for image_file in imd:
if image_file.is_file() and image_file.name not in processed_images:
images.append(image_file)
for image_file in images: processed_images = set(f.name for f in os.scandir(processed_images_dir))
try: images = []
image_path = os.path.join(data_images_dir, image_file.name) with os.scandir(data_images_dir) as imd:
labels_path = os.path.join(data_labels_dir, f'{Path(image_path).stem}.txt') for image_file in imd:
image = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED) if image_file.is_file() and image_file.name not in processed_images:
process_image(ImageLabel( images.append(image_file)
image_path=image_path,
image=image, for image_file in images:
labels_path=labels_path, try:
labels=read_labels(labels_path) image_path = os.path.join(data_images_dir, image_file.name)
)) labels_path = os.path.join(data_labels_dir, f'{Path(image_path).stem}.txt')
except Exception as e: image = cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
print(f'Error appeared {e}') process_image(ImageLabel(
image_path=image_path,
image=image,
labels_path=labels_path,
labels=read_labels(labels_path)
))
except Exception as e:
print(f'Error appeared {e}')
def main():
while True:
preprocess_annotations()
print('All processed, waiting for 2 minutes...') print('All processed, waiting for 2 minutes...')
time.sleep(120) time.sleep(120)
+27 -10
View File
@@ -1,4 +1,6 @@
import os
import random import random
import subprocess
from os import path, replace, remove, listdir, makedirs, scandir from os import path, replace, remove, listdir, makedirs, scandir
from os.path import abspath from os.path import abspath
import shutil import shutil
@@ -127,7 +129,7 @@ def get_latest_model():
dir_model_path = path.join(models_dir, d, 'weights', 'best.pt') dir_model_path = path.join(models_dir, d, 'weights', 'best.pt')
return {'date': dir_date, 'path': dir_model_path} return {'date': dir_date, 'path': dir_model_path}
dates = [convert(d) for d in listdir(models_dir)] dates = [convert(d) for d in next(os.walk(models_dir))[1]]
sorted_dates = list(sorted(dates, key=lambda x: x['date'])) sorted_dates = list(sorted(dates, key=lambda x: x['date']))
if len(sorted_dates) == 0: if len(sorted_dates) == 0:
return None, None return None, None
@@ -135,21 +137,36 @@ def get_latest_model():
return last_model['date'], last_model['path'] return last_model['date'], last_model['path']
if __name__ == '__main__': def train_dataset(existing_date=None):
latest_date, latest_model = get_latest_model() latest_date, latest_model = get_latest_model()
# create_yaml()
# form_dataset(latest_date)
if existing_date is not None:
cur_folder = f'{prefix}{existing_date}'
cur_dataset = path.join(datasets_dir, f'{prefix}{existing_date}')
else:
form_dataset(latest_date)
cur_folder = today_folder
cur_dataset = today_dataset
model_name = latest_model if latest_model is not None and path.isfile(latest_model) else 'yolov8m.yaml' model_name = latest_model if latest_model is not None and path.isfile(latest_model) else 'yolov8m.yaml'
print(f'Initial model: {model_name}') print(f'Initial model: {model_name}')
model = YOLO(model_name) model = YOLO(model_name)
cur_folder = path.join(datasets_dir, f'{prefix}2024-09-19')
# cur_folder = today_dataset yaml = abspath(path.join(cur_dataset, 'data.yaml'))
yaml = abspath(path.join(cur_folder, 'data.yaml')) results = model.train(data=yaml, epochs=100, batch=57, imgsz=640, save_period=1)
results = model.train(data=yaml, epochs=100, batch=58, imgsz=640)
shutil.copytree(results.save_dir, path.join(models_dir, today_folder)) model_dir = path.join(models_dir, cur_folder)
shutil.copytree(results.save_dir, model_dir)
shutil.copy(path.join(model_dir, 'weights', 'best.pt'), path.join(models_dir, f'{prefix[:-1]}.pt'))
shutil.rmtree('runs') shutil.rmtree('runs')
def convert2rknn():
subprocess.call(['bash', 'convert.sh'], cwd="./orangepi5")
pass
if __name__ == '__main__':
train_dataset()
convert2rknn()