mirror of
https://github.com/azaion/ai-training.git
synced 2026-04-22 10:16:34 +00:00
90 lines
3.3 KiB
Python
90 lines
3.3 KiB
Python
import os.path
|
|
import albumentations as A
|
|
import cv2
|
|
from pathlib import Path
|
|
|
|
labels_dir = 'labels'
|
|
images_dir = 'images'
|
|
current_dataset_dir = os.path.join('datasets', 'zombobase-current')
|
|
class ImageAnnotation:
|
|
|
|
def read_annotations(self) -> [[]]:
|
|
with open(self.dataset_annotation_path, 'r') as f:
|
|
rows = f.readlines()
|
|
arr = []
|
|
for row in rows:
|
|
str_coordinates = row.split(' ')
|
|
class_num = str_coordinates.pop(0)
|
|
coordinates = [float(n) for n in str_coordinates]
|
|
coordinates.append(class_num)
|
|
arr.append(coordinates)
|
|
|
|
return arr
|
|
|
|
def __init__(self, image_path):
|
|
self.image_path = image_path
|
|
self.image_name = Path(image_path).stem
|
|
self.dataset_image_path = os.path.join(current_dataset_dir, images_dir, self.image_path + '.jpg')
|
|
self.image = cv2.imread(self.dataset_image_path)
|
|
|
|
self.annotation_path = os.path.join(labels_dir, self.image_path + '.txt')
|
|
self.dataset_annotation_path = os.path.join(current_dataset_dir, labels_dir, self.image_path + '.txt')
|
|
self.annotations = self.read_annotations()
|
|
|
|
|
|
def image_processing(img_ann: ImageAnnotation) -> [ImageAnnotation]:
|
|
category_ids = []
|
|
bboxes = ImageAnnotation(img_ann).read_annotations()
|
|
for i in range(len(bboxes)):
|
|
category_ids.append(bboxes[i][4])
|
|
bboxes[i].pop(4)
|
|
|
|
transform = A.Compose([
|
|
A.HorizontalFlip(p=0.5),
|
|
A.RandomBrightnessContrast(p=0.2),
|
|
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=15, p=0.5),
|
|
], bbox_params=A.BboxParams(format='yolo', label_fields=['category_ids']))
|
|
|
|
bboxes = bboxes
|
|
imag = ImageAnnotation(img_ann).image
|
|
|
|
transformed = transform(image=imag, bboxes=bboxes, category_ids=category_ids)
|
|
transformed_image = transformed['image']
|
|
transformed_bboxes = transformed['bboxes']
|
|
transformed_category_ids = transformed['category_ids']
|
|
|
|
return transformed_image, transformed_bboxes, transformed_category_ids
|
|
|
|
def write_results(img_ann: ImageAnnotation):
|
|
for i in range(10):
|
|
transformed_image, transformed_bboxes, transformed_category_ids, = image_processing(img_ann)
|
|
cv2.imwrite(os.path.join(current_dataset_dir, images_dir, str(i)+ImageAnnotation(img_ann).image_path + '.jpg'), transformed_image)
|
|
with open(os.path.join(current_dataset_dir, labels_dir, str(i)+ImageAnnotation(img_ann).image_path + '.txt'), 'w') as f:
|
|
for bbox, category_id in zip(transformed_bboxes, transformed_category_ids):
|
|
x_center, y_center, width, height = bbox
|
|
cla = category_id
|
|
f.write(f"{cla} {x_center} {y_center} {width} {height}\n")
|
|
#
|
|
#
|
|
def process_image():
|
|
file_annotation = []
|
|
file_annotation_finished =[]
|
|
for foldername, subfolders, filenames in os.walk(os.path.join(current_dataset_dir,images_dir)):
|
|
file_annotation.append(filenames)
|
|
print(file_annotation)
|
|
|
|
for i in range(len(file_annotation[0])):
|
|
ff = file_annotation[0][i].split('.')
|
|
ff.pop(-1)
|
|
ff = '.'.join(ff)
|
|
file_annotation_finished.append(ff)
|
|
|
|
return file_annotation_finished
|
|
|
|
def main():
|
|
process_image()
|
|
for i in process_image():
|
|
write_results(i)
|
|
|
|
if __name__ == '__main__':
|
|
main() |