mirror of
https://github.com/azaion/ai-training.git
synced 2026-04-22 09:16:36 +00:00
upload model to cdn and api
switch to yolov11
This commit is contained in:
@@ -25,16 +25,9 @@ Linux
|
||||
pip install virtualenv
|
||||
python -m venv env
|
||||
env\Scripts\activate
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
```
|
||||
python -m pip install --upgrade pip
|
||||
pip install --upgrade huggingface_hub
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
|
||||
pip install git+https://github.com/airockchip/ultralytics_yolov8.git
|
||||
pip install albumentations
|
||||
```
|
||||
|
||||
**3. Fix possible problems**
|
||||
* cv2.error: OpenCV(4.10.0) ...\window.cpp:1301: error: (-2:Unspecified error)
|
||||
```
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
import io
|
||||
import requests
|
||||
|
||||
|
||||
class ApiCredentials:
|
||||
def __init__(self, url, email, password, folder):
|
||||
self.url = url
|
||||
self.email = email
|
||||
self.password = password
|
||||
self.folder = folder
|
||||
|
||||
|
||||
class Api:
|
||||
def __init__(self, credentials):
|
||||
self.token = None
|
||||
self.credentials = credentials
|
||||
|
||||
def login(self):
|
||||
response = requests.post(f'{self.credentials.url}/login',
|
||||
json={"email": self.credentials.email, "password": self.credentials.password})
|
||||
response.raise_for_status()
|
||||
token = response.json()["token"]
|
||||
self.token = token
|
||||
|
||||
def upload_file(self, filename: str, file_bytes: bytearray):
|
||||
folder = self.credentials.folder
|
||||
if self.token is None:
|
||||
self.login()
|
||||
url = f"{self.credentials.url}/resources/{folder}"
|
||||
headers = {"Authorization": f"Bearer {self.token}"}
|
||||
files = {'data': (filename, io.BytesIO(file_bytes))}
|
||||
try:
|
||||
r = requests.post(url, headers=headers, files=files, allow_redirects=True)
|
||||
r.raise_for_status()
|
||||
print(f"Upload {len(file_bytes)} bytes ({filename}) to {self.credentials.url}. Result: {r.status_code}")
|
||||
except Exception as e:
|
||||
print(f"Upload fail: {e}")
|
||||
@@ -0,0 +1,34 @@
|
||||
import io
|
||||
import boto3
|
||||
|
||||
|
||||
class CDNCredentials:
|
||||
def __init__(self, host, access_key, secret_key):
|
||||
self.host = host
|
||||
self.access_key = access_key
|
||||
self.secret_key = secret_key
|
||||
|
||||
|
||||
class CDNManager:
|
||||
def __init__(self, credentials: CDNCredentials):
|
||||
self.creds = credentials
|
||||
self.minio_client = boto3.client('s3', endpoint_url=self.creds.host,
|
||||
aws_access_key_id=self.creds.access_key,
|
||||
aws_secret_access_key=self.creds.secret_key)
|
||||
|
||||
def upload(self, bucket: str, filename: str, file_bytes: bytearray):
|
||||
try:
|
||||
self.minio_client.upload_fileobj(io.BytesIO(file_bytes), bucket, filename)
|
||||
print(f'Uploaded {len(file_bytes)} bytes to {self.creds.host}/{bucket}/{filename}')
|
||||
return True
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return False
|
||||
|
||||
def download(self, bucket: str, filename: str):
|
||||
try:
|
||||
self.minio_client.download_file(bucket, filename, filename)
|
||||
return True
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return False
|
||||
+11
@@ -0,0 +1,11 @@
|
||||
cdn:
|
||||
host: 'https://cdnapi.azaion.com'
|
||||
access_key: '8gx5HWhLXD1sEZgQjxam'
|
||||
secret_key: 'KHjr6xmveqgKa7UibY9kEQUQ7VhjT8yfmG1fP0tV'
|
||||
bucket: 'models'
|
||||
|
||||
api:
|
||||
url: 'https://api.azaion.com'
|
||||
user: 'admin@azaion.com'
|
||||
pw: 'Az@1on1000Odm$n'
|
||||
folder: ''
|
||||
@@ -27,3 +27,5 @@ annotation_classes = AnnotationClass.read_json()
|
||||
date_format = '%Y-%m-%d'
|
||||
checkpoint_file = 'checkpoint.txt'
|
||||
checkpoint_date_format = '%Y-%m-%d %H:%M:%S'
|
||||
|
||||
CONFIG_FILE = "config.yaml"
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
huggingface_hub
|
||||
torch
|
||||
torchvision
|
||||
torchaudio
|
||||
ultralytics
|
||||
albumentations~=2.0.4
|
||||
|
||||
opencv-python~=4.11.0.86
|
||||
matplotlib~=3.10.0
|
||||
PyYAML~=6.0.2
|
||||
cryptography~=44.0.1
|
||||
numpy~=2.1.1
|
||||
requests~=2.32.3
|
||||
pyyaml
|
||||
+58
@@ -0,0 +1,58 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import padding
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
|
||||
|
||||
BUFFER_SIZE = 64 * 1024 # 64 KB
|
||||
|
||||
|
||||
class Security:
|
||||
@staticmethod
|
||||
def encrypt_to(input_bytes, key):
|
||||
aes_key = hashlib.sha256(key.encode('utf-8')).digest()
|
||||
iv = os.urandom(16)
|
||||
|
||||
cipher = Cipher(algorithms.AES(aes_key), modes.CBC(iv), backend=default_backend())
|
||||
encryptor = cipher.encryptor()
|
||||
padder = padding.PKCS7(128).padder()
|
||||
|
||||
padded_plaintext = padder.update(input_bytes) + padder.finalize()
|
||||
ciphertext = encryptor.update(padded_plaintext) + encryptor.finalize()
|
||||
|
||||
return iv + ciphertext
|
||||
|
||||
@staticmethod
|
||||
def decrypt_to(ciphertext_with_iv_bytes, key):
|
||||
aes_key = hashlib.sha256(key.encode('utf-8')).digest()
|
||||
iv = ciphertext_with_iv_bytes[:16]
|
||||
ciphertext_bytes = ciphertext_with_iv_bytes[16:]
|
||||
|
||||
cipher = Cipher(algorithms.AES(aes_key), modes.CBC(iv), backend=default_backend())
|
||||
decryptor = cipher.decryptor()
|
||||
|
||||
decrypted_padded_bytes = decryptor.update(ciphertext_bytes) + decryptor.finalize()
|
||||
|
||||
# Manual PKCS7 unpadding check and removal
|
||||
padding_value = decrypted_padded_bytes[-1] # Get the last byte, which indicates padding length
|
||||
if 1 <= padding_value <= 16: # Valid PKCS7 padding value range for AES-128
|
||||
padding_length = padding_value
|
||||
plaintext_bytes = decrypted_padded_bytes[:-padding_length] # Remove padding bytes
|
||||
else:
|
||||
plaintext_bytes = decrypted_padded_bytes
|
||||
|
||||
return bytes(plaintext_bytes)
|
||||
|
||||
@staticmethod
|
||||
def get_model_encryption_key():
|
||||
key = '-#%@AzaionKey@%#---234sdfklgvhjbnn'
|
||||
return Security.calc_hash(key)
|
||||
|
||||
@staticmethod
|
||||
def calc_hash(key):
|
||||
str_bytes = key.encode('utf-8')
|
||||
hash_bytes = hashlib.sha384(str_bytes).digest()
|
||||
h = base64.b64encode(hash_bytes).decode('utf-8')
|
||||
return h
|
||||
@@ -1,6 +1,33 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from ultralytics import YOLO
|
||||
import yaml
|
||||
|
||||
|
||||
class Predictor(ABC):
|
||||
@abstractmethod
|
||||
def predict(self, frame):
|
||||
pass
|
||||
|
||||
|
||||
class OnnxPredictor(Predictor):
|
||||
def __init__(self):
|
||||
self.model = YOLO('azaion.onnx')
|
||||
self.model.task = 'detect'
|
||||
with open('data.yaml', 'r') as f:
|
||||
data_yaml = yaml.safe_load(f)
|
||||
class_names = data_yaml['names']
|
||||
|
||||
names = self.model.names
|
||||
|
||||
def predict(self, frame):
|
||||
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
return results[0].plot()
|
||||
|
||||
|
||||
class YoloPredictor(Predictor):
|
||||
def __init__(self):
|
||||
self.model = YOLO('azaion.pt')
|
||||
|
||||
def predict(self, frame):
|
||||
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
return results[0].plot()
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
import io
|
||||
from security import Security
|
||||
|
||||
key = Security.get_model_encryption_key()
|
||||
test_str = 'test test test 123'
|
||||
|
||||
test_encrypted = Security.encrypt_to(io.BytesIO(test_str.encode('utf-8')), key)
|
||||
test_res = Security.decrypt_to(io.BytesIO(test_encrypted), key)
|
||||
print(f'Initial: {test_str}')
|
||||
print(f'Result : {test_res}')
|
||||
@@ -5,20 +5,23 @@ from ultralytics import YOLO
|
||||
import cv2
|
||||
from time import sleep
|
||||
|
||||
from yolo_predictor import YOLOPredictor
|
||||
from predictor import OnnxPredictor, YoloPredictor
|
||||
|
||||
# video_url = 'https://www.youtube.com/watch?v=d1n2fDOSo8c'
|
||||
# stream = CamGear(source=video_url, stream_mode=True, logging=True).start()
|
||||
write_output = False
|
||||
|
||||
predictor = YOLOPredictor()
|
||||
predictor = YoloPredictor()
|
||||
|
||||
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
|
||||
|
||||
input_name = 'ForAI.mp4'
|
||||
input_name = 'ForAI_test.mp4'
|
||||
output_name = Path(input_name).stem + '_recognised.mp4'
|
||||
|
||||
v_input = cv2.VideoCapture(input_name)
|
||||
v_output = cv2.VideoWriter(output_name, fourcc, 20.0, (640, 480))
|
||||
|
||||
if write_output:
|
||||
v_output = cv2.VideoWriter(output_name, fourcc, 20.0, (640, 480))
|
||||
|
||||
while v_input.isOpened():
|
||||
ret, frame = v_input.read()
|
||||
@@ -30,10 +33,12 @@ while v_input.isOpened():
|
||||
cv2.imshow('Video', frame_detected)
|
||||
sleep(0.01)
|
||||
|
||||
if write_output:
|
||||
v_output.write(frame_detected)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
v_input.release()
|
||||
v_output.release()
|
||||
if write_output:
|
||||
v_output.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import yaml
|
||||
|
||||
from predictor import Predictor
|
||||
from ultralytics import YOLO
|
||||
|
||||
class YOLOPredictor(Predictor):
|
||||
def __init__(self):
|
||||
self.model = YOLO('/azaion/models/azaion.onnx')
|
||||
self.model.task = 'detect'
|
||||
with open('data.yaml', 'r') as f:
|
||||
data_yaml = yaml.safe_load(f)
|
||||
class_names = data_yaml['names']
|
||||
|
||||
names = self.model.names
|
||||
|
||||
def predict(self, frame):
|
||||
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
return results[0].plot()
|
||||
@@ -1,3 +1,4 @@
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
@@ -6,7 +7,15 @@ from datetime import datetime
|
||||
from os import path, replace, listdir, makedirs, scandir
|
||||
from os.path import abspath
|
||||
from pathlib import Path
|
||||
from utils import Dotdict
|
||||
|
||||
import yaml
|
||||
from ultralytics import YOLO
|
||||
|
||||
import constants
|
||||
from azaion_api import ApiCredentials, Api
|
||||
from cdn_manager import CDNCredentials, CDNManager
|
||||
from security import Security
|
||||
from constants import (processed_images_dir,
|
||||
processed_labels_dir,
|
||||
annotation_classes,
|
||||
@@ -145,6 +154,7 @@ def get_latest_model():
|
||||
last_model = sorted_dates[-1]
|
||||
return last_model['date'], last_model['path']
|
||||
|
||||
|
||||
def train_dataset(existing_date=None, from_scratch=False):
|
||||
latest_date, latest_model = get_latest_model()
|
||||
|
||||
@@ -156,7 +166,7 @@ def train_dataset(existing_date=None, from_scratch=False):
|
||||
cur_folder = today_folder
|
||||
cur_dataset = today_dataset
|
||||
|
||||
model_name = latest_model if latest_model is not None and path.isfile(latest_model) and not from_scratch else 'yolov8m.yaml'
|
||||
model_name = latest_model if latest_model is not None and path.isfile(latest_model) and not from_scratch else 'yolo11m.yaml'
|
||||
print(f'Initial model: {model_name}')
|
||||
model = YOLO(model_name)
|
||||
|
||||
@@ -171,8 +181,10 @@ def train_dataset(existing_date=None, from_scratch=False):
|
||||
model_dir = path.join(models_dir, cur_folder)
|
||||
shutil.copytree(results.save_dir, model_dir)
|
||||
|
||||
shutil.copy(path.join(model_dir, 'weights', 'best.pt'), path.join(models_dir, f'{prefix[:-1]}.pt'))
|
||||
model_path = path.join(models_dir, f'{prefix[:-1]}.pt')
|
||||
shutil.copy(path.join(model_dir, 'weights', 'best.pt'), model_path)
|
||||
shutil.rmtree('runs')
|
||||
return model_path
|
||||
|
||||
|
||||
def convert2rknn():
|
||||
@@ -209,8 +221,38 @@ def validate(model_path):
|
||||
metrics = model.val()
|
||||
pass
|
||||
|
||||
|
||||
def upload_model(model_path: str):
|
||||
|
||||
# model = YOLO(model_path)
|
||||
# model.export(format="onnx", imgsz=1280, nms=True, batch=4)
|
||||
onnx_model = path.dirname(model_path) + Path(model_path).stem + '.onnx'
|
||||
|
||||
with open(onnx_model, 'rb') as f_in:
|
||||
onnx_bytes = f_in.read()
|
||||
|
||||
key = Security.get_model_encryption_key()
|
||||
onnx_encrypted = Security.encrypt_to(onnx_bytes, key)
|
||||
|
||||
part1_size = min(10 * 1024, int(0.9 * len(onnx_encrypted)))
|
||||
onnx_part_small = onnx_encrypted[:part1_size] # slice bytes for part1
|
||||
onnx_part_big = onnx_encrypted[part1_size:]
|
||||
|
||||
with open(constants.CONFIG_FILE, "r") as f:
|
||||
config_dict = yaml.safe_load(f)
|
||||
d_config = Dotdict(config_dict)
|
||||
cdn_c = Dotdict(d_config.cdn)
|
||||
api_c = Dotdict(d_config.api)
|
||||
cdn_manager = CDNManager(CDNCredentials(cdn_c.host, cdn_c.access_key, cdn_c.secret_key))
|
||||
cdn_manager.upload(cdn_c.bucket, 'azaion.onnx.big', onnx_part_big)
|
||||
|
||||
api = Api(ApiCredentials(api_c.url, api_c.user, api_c.pw, api_c.folder))
|
||||
api.upload_file('azaion.onnx.small', onnx_part_small)
|
||||
|
||||
if __name__ == '__main__':
|
||||
train_dataset('2024-10-26', from_scratch=True)
|
||||
validate(path.join('runs', 'detect', 'train7', 'weights', 'best.pt'))
|
||||
form_data_sample(500)
|
||||
convert2rknn()
|
||||
# model_path = train_dataset('2024-10-26', from_scratch=True)
|
||||
# validate(path.join('runs', 'detect', 'train7', 'weights', 'best.pt'))
|
||||
# form_data_sample(500)
|
||||
# convert2rknn()
|
||||
model_path = 'azaion.pt'
|
||||
upload_model(model_path)
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
class Dotdict(dict):
|
||||
"""dot.notation access to dictionary attributes"""
|
||||
__getattr__ = dict.get
|
||||
__setattr__ = dict.__setitem__
|
||||
__delattr__ = dict.__delitem__
|
||||
-46
@@ -1,46 +0,0 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
|
||||
# [depth, width, max_channels]
|
||||
n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs
|
||||
s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs
|
||||
m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs
|
||||
l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
|
||||
x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
|
||||
|
||||
# YOLOv8.0n backbone
|
||||
backbone:
|
||||
# [from, repeats, module, args]
|
||||
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
|
||||
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
|
||||
- [-1, 3, C2f, [128, True]]
|
||||
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
|
||||
- [-1, 6, C2f, [256, True]]
|
||||
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
|
||||
- [-1, 6, C2f, [512, True]]
|
||||
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
|
||||
- [-1, 3, C2f, [1024, True]]
|
||||
- [-1, 1, SPPF, [1024, 5]] # 9
|
||||
|
||||
# YOLOv8.0n head
|
||||
head:
|
||||
- [-1, 1, nn.Upsample, [None, 2, 'nearest']]
|
||||
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
||||
- [-1, 3, C2f, [512]] # 12
|
||||
|
||||
- [-1, 1, nn.Upsample, [None, 2, 'nearest']]
|
||||
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
|
||||
- [-1, 3, C2f, [256]] # 15 (P3/8-small)
|
||||
|
||||
- [-1, 1, Conv, [256, 3, 2]]
|
||||
- [[-1, 12], 1, Concat, [1]] # cat head P4
|
||||
- [-1, 3, C2f, [512]] # 18 (P4/16-medium)
|
||||
|
||||
- [-1, 1, Conv, [512, 3, 2]]
|
||||
- [[-1, 9], 1, Concat, [1]] # cat head P5
|
||||
- [-1, 3, C2f, [1024]] # 21 (P5/32-large)
|
||||
|
||||
- [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
|
||||
Reference in New Issue
Block a user