mirror of
https://github.com/azaion/ai-training.git
synced 2026-04-22 08:56:35 +00:00
upload model to cdn and api
switch to yolov11
This commit is contained in:
@@ -1,6 +1,33 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from ultralytics import YOLO
|
||||
import yaml
|
||||
|
||||
|
||||
class Predictor(ABC):
|
||||
@abstractmethod
|
||||
def predict(self, frame):
|
||||
pass
|
||||
|
||||
|
||||
class OnnxPredictor(Predictor):
|
||||
def __init__(self):
|
||||
self.model = YOLO('azaion.onnx')
|
||||
self.model.task = 'detect'
|
||||
with open('data.yaml', 'r') as f:
|
||||
data_yaml = yaml.safe_load(f)
|
||||
class_names = data_yaml['names']
|
||||
|
||||
names = self.model.names
|
||||
|
||||
def predict(self, frame):
|
||||
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
return results[0].plot()
|
||||
|
||||
|
||||
class YoloPredictor(Predictor):
|
||||
def __init__(self):
|
||||
self.model = YOLO('azaion.pt')
|
||||
|
||||
def predict(self, frame):
|
||||
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
return results[0].plot()
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
import io
|
||||
from security import Security
|
||||
|
||||
key = Security.get_model_encryption_key()
|
||||
test_str = 'test test test 123'
|
||||
|
||||
test_encrypted = Security.encrypt_to(io.BytesIO(test_str.encode('utf-8')), key)
|
||||
test_res = Security.decrypt_to(io.BytesIO(test_encrypted), key)
|
||||
print(f'Initial: {test_str}')
|
||||
print(f'Result : {test_res}')
|
||||
@@ -5,20 +5,23 @@ from ultralytics import YOLO
|
||||
import cv2
|
||||
from time import sleep
|
||||
|
||||
from yolo_predictor import YOLOPredictor
|
||||
from predictor import OnnxPredictor, YoloPredictor
|
||||
|
||||
# video_url = 'https://www.youtube.com/watch?v=d1n2fDOSo8c'
|
||||
# stream = CamGear(source=video_url, stream_mode=True, logging=True).start()
|
||||
write_output = False
|
||||
|
||||
predictor = YOLOPredictor()
|
||||
predictor = YoloPredictor()
|
||||
|
||||
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
|
||||
|
||||
input_name = 'ForAI.mp4'
|
||||
input_name = 'ForAI_test.mp4'
|
||||
output_name = Path(input_name).stem + '_recognised.mp4'
|
||||
|
||||
v_input = cv2.VideoCapture(input_name)
|
||||
v_output = cv2.VideoWriter(output_name, fourcc, 20.0, (640, 480))
|
||||
|
||||
if write_output:
|
||||
v_output = cv2.VideoWriter(output_name, fourcc, 20.0, (640, 480))
|
||||
|
||||
while v_input.isOpened():
|
||||
ret, frame = v_input.read()
|
||||
@@ -30,10 +33,12 @@ while v_input.isOpened():
|
||||
cv2.imshow('Video', frame_detected)
|
||||
sleep(0.01)
|
||||
|
||||
v_output.write(frame_detected)
|
||||
if write_output:
|
||||
v_output.write(frame_detected)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
v_input.release()
|
||||
v_output.release()
|
||||
if write_output:
|
||||
v_output.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import yaml
|
||||
|
||||
from predictor import Predictor
|
||||
from ultralytics import YOLO
|
||||
|
||||
class YOLOPredictor(Predictor):
|
||||
def __init__(self):
|
||||
self.model = YOLO('/azaion/models/azaion.onnx')
|
||||
self.model.task = 'detect'
|
||||
with open('data.yaml', 'r') as f:
|
||||
data_yaml = yaml.safe_load(f)
|
||||
class_names = data_yaml['names']
|
||||
|
||||
names = self.model.names
|
||||
|
||||
def predict(self, frame):
|
||||
results = self.model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
return results[0].plot()
|
||||
Reference in New Issue
Block a user