mirror of
https://github.com/azaion/ai-training.git
synced 2026-04-22 14:36:37 +00:00
add rknn conversion - install and use scripts, auto convert to rknn after AI training is done and put pt and rknn models to /azaion/models directory
This commit is contained in:
@@ -5,14 +5,16 @@ from ultralytics import YOLO
|
||||
import cv2
|
||||
from time import sleep
|
||||
|
||||
model = YOLO('azaion-2024-08-13.pt')
|
||||
from yolo_predictor import YOLOPredictor
|
||||
|
||||
# video_url = 'https://www.youtube.com/watch?v=d1n2fDOSo8c'
|
||||
# stream = CamGear(source=video_url, stream_mode=True, logging=True).start()
|
||||
|
||||
predictor = YOLOPredictor()
|
||||
|
||||
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
|
||||
|
||||
input_name = sys.argv[1]
|
||||
input_name = 'ForAI.mp4'
|
||||
output_name = Path(input_name).stem + '_recognised.mp4'
|
||||
|
||||
v_input = cv2.VideoCapture(input_name)
|
||||
@@ -23,9 +25,7 @@ while v_input.isOpened():
|
||||
if frame is None:
|
||||
break
|
||||
|
||||
results = model.track(frame, persist=True, tracker='bytetrack.yaml')
|
||||
frame_detected = results[0].plot()
|
||||
|
||||
frame_detected = predictor.predict(frame)
|
||||
frame_detected = cv2.resize(frame_detected, (640, 480))
|
||||
cv2.imshow('Video', frame_detected)
|
||||
sleep(0.01)
|
||||
|
||||
Reference in New Issue
Block a user