fix loader bug with _CACHED_HW_INFO

put tile size to name and set it dynamically for AI recognition
This commit is contained in:
Oleksandr Bezdieniezhnykh
2025-09-02 13:59:23 +03:00
parent 067f02cc63
commit b6b6751c37
19 changed files with 83 additions and 104 deletions
+4 -2
View File
@@ -504,11 +504,13 @@ public partial class Annotator
if (files.Count == 0) if (files.Count == 0)
return; return;
await _inferenceService.RunInference(files, DetectionCancellationSource.Token); //TODO: Get Tile Size from UI based on height setup
var tileSize = 550;
await _inferenceService.RunInference(files, tileSize, DetectionCancellationSource.Token);
LvFiles.Items.Refresh(); LvFiles.Items.Refresh();
_isInferenceNow = false; _isInferenceNow = false;
StatusHelp.Text = "Розпізнавання зваершено"; StatusHelp.Text = "Розпізнавання завершено";
AIDetectBtn.IsEnabled = true; AIDetectBtn.IsEnabled = true;
} }
+1 -1
View File
@@ -327,7 +327,7 @@ public class AnnotatorEventHandler(
foreach (var res in results) foreach (var res in results)
{ {
var time = TimeSpan.Zero; var time = TimeSpan.Zero;
var annotationName = $"{formState.MediaName}{Constants.SPLIT_SUFFIX}{res.Tile.Left:0000}_{res.Tile.Top:0000}!".ToTimeName(time); var annotationName = $"{formState.MediaName}{Constants.SPLIT_SUFFIX}{res.Tile.Width}{res.Tile.Left:0000}_{res.Tile.Top:0000}!".ToTimeName(time);
var tileImgPath = Path.Combine(dirConfig.Value.ImagesDirectory, $"{annotationName}{Constants.JPG_EXT}"); var tileImgPath = Path.Combine(dirConfig.Value.ImagesDirectory, $"{annotationName}{Constants.JPG_EXT}");
var bitmap = new CroppedBitmap(source, new Int32Rect((int)res.Tile.Left, (int)res.Tile.Top, (int)res.Tile.Width, (int)res.Tile.Height)); var bitmap = new CroppedBitmap(source, new Int32Rect((int)res.Tile.Left, (int)res.Tile.Top, (int)res.Tile.Width, (int)res.Tile.Height));
@@ -1,8 +0,0 @@
using System.Drawing;
namespace Azaion.Annotator.Extensions;
public static class RectangleFExtensions
{
public static double Area(this RectangleF rectangle) => rectangle.Width * rectangle.Height;
}
@@ -1,14 +0,0 @@
using System.ComponentModel;
namespace Azaion.Annotator;
public static class SynchronizeInvokeExtensions
{
public static void InvokeEx<T>(this T t, Action<T> action) where T : ISynchronizeInvoke
{
if (t.InvokeRequired)
t.Invoke(action, [t]);
else
action(t);
}
}
+1 -1
View File
@@ -16,7 +16,7 @@ public static class Constants
public const string DEFAULT_API_URL = "https://api.azaion.com"; public const string DEFAULT_API_URL = "https://api.azaion.com";
public const string AZAION_SUITE_EXE = "Azaion.Suite.exe"; public const string AZAION_SUITE_EXE = "Azaion.Suite.exe";
public const int AI_TILE_SIZE = 1280; public const int AI_TILE_SIZE_DEFAULT = 1280;
#region ExternalClientsConfig #region ExternalClientsConfig
+2 -2
View File
@@ -482,8 +482,8 @@ public class CanvasEditor : Canvas
canvasLabel = new CanvasLabel(detection, RenderSize, mediaSize, detection.Confidence); canvasLabel = new CanvasLabel(detection, RenderSize, mediaSize, detection.Confidence);
else else
{ {
canvasLabel = new CanvasLabel(detection, new Size(Constants.AI_TILE_SIZE, Constants.AI_TILE_SIZE), null, detection.Confidence) canvasLabel = new CanvasLabel(detection, annotation.SplitTile!.Size, null, detection.Confidence)
.ReframeFromSmall(annotation.SplitTile!); .ReframeFromSmall(annotation.SplitTile);
//From CurrentMediaSize to Render Size //From CurrentMediaSize to Render Size
var yoloLabel = new YoloLabel(canvasLabel, mediaSize); var yoloLabel = new YoloLabel(canvasLabel, mediaSize);
+5 -5
View File
@@ -59,13 +59,13 @@ public class Annotation
return _splitTile; return _splitTile;
var startCoordIndex = Name.IndexOf(Constants.SPLIT_SUFFIX, StringComparison.Ordinal) + Constants.SPLIT_SUFFIX.Length; var startCoordIndex = Name.IndexOf(Constants.SPLIT_SUFFIX, StringComparison.Ordinal) + Constants.SPLIT_SUFFIX.Length;
var coordsStr = Name.Substring(startCoordIndex, 9).Split('_'); var coordsStr = Name.Substring(startCoordIndex, 14).Split('_');
_splitTile = new CanvasLabel _splitTile = new CanvasLabel
{ {
Left = double.Parse(coordsStr[0]), Left = double.Parse(coordsStr[1]),
Top = double.Parse(coordsStr[1]), Top = double.Parse(coordsStr[2]),
Width = Constants.AI_TILE_SIZE, Width = double.Parse(coordsStr[0]),
Height = Constants.AI_TILE_SIZE Height = double.Parse(coordsStr[0])
}; };
return _splitTile; return _splitTile;
} }
+1 -1
View File
@@ -6,5 +6,5 @@ public static class SizeExtensions
{ {
public static bool FitSizeForAI(this Size size) => public static bool FitSizeForAI(this Size size) =>
// Allow to be up to FullHD to save as 1280*1280 // Allow to be up to FullHD to save as 1280*1280
size.Width <= Constants.AI_TILE_SIZE * 1.5 && size.Height <= Constants.AI_TILE_SIZE * 1.5; size.Width <= Constants.AI_TILE_SIZE_DEFAULT * 1.5 && size.Height <= Constants.AI_TILE_SIZE_DEFAULT * 1.5;
} }
@@ -7,50 +7,44 @@ namespace Azaion.Common.Services.Inference;
public interface IInferenceService public interface IInferenceService
{ {
Task RunInference(List<string> mediaPaths, CancellationToken ct = default); Task RunInference(List<string> mediaPaths, int tileSize, CancellationToken ct = default);
CancellationTokenSource InferenceCancelTokenSource { get; set; } CancellationTokenSource InferenceCancelTokenSource { get; set; }
void StopInference(); void StopInference();
} }
// SHOULD BE ONLY ONE INSTANCE OF InferenceService. Do not add ANY NotificationHandler to it! // SHOULD BE ONLY ONE INSTANCE OF InferenceService. Do not add ANY NotificationHandler to it!
// _inferenceCancelTokenSource should be created only once. // _inferenceCancelTokenSource should be created only once.
public class InferenceService : IInferenceService public class InferenceService(
IInferenceClient client,
IAzaionApi azaionApi,
IOptions<AIRecognitionConfig> aiConfigOptions) : IInferenceService
{ {
private readonly IInferenceClient _client;
private readonly IAzaionApi _azaionApi;
private readonly IOptions<AIRecognitionConfig> _aiConfigOptions;
public CancellationTokenSource InferenceCancelTokenSource { get; set; } = new(); public CancellationTokenSource InferenceCancelTokenSource { get; set; } = new();
public CancellationTokenSource CheckAIAvailabilityTokenSource { get; set; } = new(); public CancellationTokenSource CheckAIAvailabilityTokenSource { get; set; } = new();
public InferenceService(IInferenceClient client, IAzaionApi azaionApi, IOptions<AIRecognitionConfig> aiConfigOptions)
{
_client = client;
_azaionApi = azaionApi;
_aiConfigOptions = aiConfigOptions;
}
public async Task CheckAIAvailabilityStatus() public async Task CheckAIAvailabilityStatus()
{ {
CheckAIAvailabilityTokenSource = new CancellationTokenSource(); CheckAIAvailabilityTokenSource = new CancellationTokenSource();
while (!CheckAIAvailabilityTokenSource.IsCancellationRequested) while (!CheckAIAvailabilityTokenSource.IsCancellationRequested)
{ {
_client.Send(RemoteCommand.Create(CommandType.AIAvailabilityCheck)); client.Send(RemoteCommand.Create(CommandType.AIAvailabilityCheck));
await Task.Delay(10000, CheckAIAvailabilityTokenSource.Token); await Task.Delay(10000, CheckAIAvailabilityTokenSource.Token);
} }
} }
public async Task RunInference(List<string> mediaPaths, CancellationToken ct = default) public async Task RunInference(List<string> mediaPaths, int tileSize, CancellationToken ct = default)
{ {
InferenceCancelTokenSource = new CancellationTokenSource(); InferenceCancelTokenSource = new CancellationTokenSource();
_client.Send(RemoteCommand.Create(CommandType.Login, _azaionApi.Credentials)); client.Send(RemoteCommand.Create(CommandType.Login, azaionApi.Credentials));
var aiConfig = _aiConfigOptions.Value; var aiConfig = aiConfigOptions.Value;
aiConfig.Paths = mediaPaths; aiConfig.Paths = mediaPaths;
_client.Send(RemoteCommand.Create(CommandType.Inference, aiConfig)); aiConfig.TileSize = tileSize;
client.Send(RemoteCommand.Create(CommandType.Inference, aiConfig));
using var combinedTokenSource = CancellationTokenSource.CreateLinkedTokenSource(ct, InferenceCancelTokenSource.Token); using var combinedTokenSource = CancellationTokenSource.CreateLinkedTokenSource(ct, InferenceCancelTokenSource.Token);
await combinedTokenSource.Token.AsTask(); await combinedTokenSource.Token.AsTask();
} }
public void StopInference() => _client.Stop(); public void StopInference() => client.Stop();
} }
+4 -10
View File
@@ -4,16 +4,10 @@ using Azaion.Common.DTO;
namespace Azaion.Common.Services; namespace Azaion.Common.Services;
public class TileResult public class TileResult(CanvasLabel tile, List<CanvasLabel> detections)
{ {
public CanvasLabel Tile { get; set; } public CanvasLabel Tile { get; set; } = tile;
public List<CanvasLabel> Detections { get; set; } public List<CanvasLabel> Detections { get; set; } = detections;
public TileResult(CanvasLabel tile, List<CanvasLabel> detections)
{
Tile = tile;
Detections = detections;
}
} }
public static class TileProcessor public static class TileProcessor
@@ -41,7 +35,7 @@ public static class TileProcessor
private static TileResult GetDetectionsInTile(Size originalSize, CanvasLabel startDet, List<CanvasLabel> allDetections) private static TileResult GetDetectionsInTile(Size originalSize, CanvasLabel startDet, List<CanvasLabel> allDetections)
{ {
var tile = new CanvasLabel(startDet.Left, startDet.Right, startDet.Top, startDet.Bottom); var tile = new CanvasLabel(startDet.Left, startDet.Right, startDet.Top, startDet.Bottom);
var maxSize = new List<double> { startDet.Width + BORDER, startDet.Height + BORDER, Constants.AI_TILE_SIZE }.Max(); var maxSize = new List<double> { startDet.Width + BORDER, startDet.Height + BORDER, Constants.AI_TILE_SIZE_DEFAULT }.Max();
var selectedDetections = new List<CanvasLabel>{startDet}; var selectedDetections = new List<CanvasLabel>{startDet};
foreach (var det in allDetections) foreach (var det in allDetections)
+2
View File
@@ -1,4 +1,5 @@
cdef class AIRecognitionConfig: cdef class AIRecognitionConfig:
cdef public double frame_recognition_seconds cdef public double frame_recognition_seconds
cdef public int frame_period_recognition cdef public int frame_period_recognition
cdef public double probability_threshold cdef public double probability_threshold
@@ -8,6 +9,7 @@ cdef class AIRecognitionConfig:
cdef public double tracking_intersection_threshold cdef public double tracking_intersection_threshold
cdef public int big_image_tile_overlap_percent cdef public int big_image_tile_overlap_percent
cdef public int tile_size
cdef public bytes file_data cdef public bytes file_data
cdef public list[str] paths cdef public list[str] paths
+11 -5
View File
@@ -9,11 +9,13 @@ cdef class AIRecognitionConfig:
tracking_distance_confidence, tracking_distance_confidence,
tracking_probability_increase, tracking_probability_increase,
tracking_intersection_threshold, tracking_intersection_threshold,
big_image_tile_overlap_percent,
file_data, file_data,
paths, paths,
model_batch_size model_batch_size,
big_image_tile_overlap_percent,
tile_size
): ):
self.frame_period_recognition = frame_period_recognition self.frame_period_recognition = frame_period_recognition
self.frame_recognition_seconds = frame_recognition_seconds self.frame_recognition_seconds = frame_recognition_seconds
@@ -22,12 +24,14 @@ cdef class AIRecognitionConfig:
self.tracking_distance_confidence = tracking_distance_confidence self.tracking_distance_confidence = tracking_distance_confidence
self.tracking_probability_increase = tracking_probability_increase self.tracking_probability_increase = tracking_probability_increase
self.tracking_intersection_threshold = tracking_intersection_threshold self.tracking_intersection_threshold = tracking_intersection_threshold
self.big_image_tile_overlap_percent = big_image_tile_overlap_percent
self.file_data = file_data self.file_data = file_data
self.paths = paths self.paths = paths
self.model_batch_size = model_batch_size self.model_batch_size = model_batch_size
self.big_image_tile_overlap_percent = big_image_tile_overlap_percent
self.tile_size = tile_size
def __str__(self): def __str__(self):
return (f'frame_seconds : {self.frame_recognition_seconds}, distance_confidence : {self.tracking_distance_confidence}, ' return (f'frame_seconds : {self.frame_recognition_seconds}, distance_confidence : {self.tracking_distance_confidence}, '
f'probability_increase : {self.tracking_probability_increase}, ' f'probability_increase : {self.tracking_probability_increase}, '
@@ -48,9 +52,11 @@ cdef class AIRecognitionConfig:
unpacked.get("t_dc", 0.0), unpacked.get("t_dc", 0.0),
unpacked.get("t_pi", 0.0), unpacked.get("t_pi", 0.0),
unpacked.get("t_it", 0.0), unpacked.get("t_it", 0.0),
unpacked.get("ov_p", 20),
unpacked.get("d", b''), unpacked.get("d", b''),
unpacked.get("p", []), unpacked.get("p", []),
unpacked.get("m_bs") unpacked.get("m_bs"),
unpacked.get("ov_p", 20),
unpacked.get("tile_size", 550),
) )
+1 -3
View File
@@ -18,8 +18,6 @@ cdef class Inference:
cdef str model_input cdef str model_input
cdef int model_width cdef int model_width
cdef int model_height cdef int model_height
cdef int tile_width
cdef int tile_height
cdef bytes get_onnx_engine_bytes(self) cdef bytes get_onnx_engine_bytes(self)
cdef init_ai(self) cdef init_ai(self)
@@ -30,7 +28,7 @@ cdef class Inference:
cdef _process_video(self, RemoteCommand cmd, AIRecognitionConfig ai_config, str video_name) cdef _process_video(self, RemoteCommand cmd, AIRecognitionConfig ai_config, str video_name)
cdef _process_images(self, RemoteCommand cmd, AIRecognitionConfig ai_config, list[str] image_paths) cdef _process_images(self, RemoteCommand cmd, AIRecognitionConfig ai_config, list[str] image_paths)
cdef _process_images_inner(self, RemoteCommand cmd, AIRecognitionConfig ai_config, list frame_data) cdef _process_images_inner(self, RemoteCommand cmd, AIRecognitionConfig ai_config, list frame_data)
cdef split_to_tiles(self, frame, path, overlap_percent) cdef split_to_tiles(self, frame, path, tile_size, overlap_percent)
cdef stop(self) cdef stop(self)
cdef preprocess(self, frames) cdef preprocess(self, frames)
+18 -23
View File
@@ -58,8 +58,6 @@ cdef class Inference:
self.model_input = None self.model_input = None
self.model_width = 0 self.model_width = 0
self.model_height = 0 self.model_height = 0
self.tile_width = 0
self.tile_height = 0
self.engine = None self.engine = None
self.is_building_engine = False self.is_building_engine = False
self.ai_availability_status = AIAvailabilityStatus() self.ai_availability_status = AIAvailabilityStatus()
@@ -107,15 +105,11 @@ cdef class Inference:
self.is_building_engine = False self.is_building_engine = False
self.model_height, self.model_width = self.engine.get_input_shape() self.model_height, self.model_width = self.engine.get_input_shape()
#todo: temporarily, send it from the client
self.tile_width = 550
self.tile_height = 550
except Exception as e: except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str>str(e)) self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str>str(e))
self.is_building_engine = False self.is_building_engine = False
cdef preprocess(self, frames): cdef preprocess(self, frames):
blobs = [cv2.dnn.blobFromImage(frame, blobs = [cv2.dnn.blobFromImage(frame,
scalefactor=1.0 / 255.0, scalefactor=1.0 / 255.0,
@@ -277,7 +271,7 @@ cdef class Inference:
if img_h <= 1.5 * self.model_height and img_w <= 1.5 * self.model_width: if img_h <= 1.5 * self.model_height and img_w <= 1.5 * self.model_width:
frame_data.append((frame, original_media_name, f'{original_media_name}_000000')) frame_data.append((frame, original_media_name, f'{original_media_name}_000000'))
else: else:
res = self.split_to_tiles(frame, path, ai_config.big_image_tile_overlap_percent) res = self.split_to_tiles(frame, path, ai_config.tile_size, ai_config.big_image_tile_overlap_percent)
frame_data.extend(res) frame_data.extend(res)
if len(frame_data) > self.engine.get_batch_size(): if len(frame_data) > self.engine.get_batch_size():
for chunk in self.split_list_extend(frame_data, self.engine.get_batch_size()): for chunk in self.split_list_extend(frame_data, self.engine.get_batch_size()):
@@ -287,31 +281,31 @@ cdef class Inference:
self._process_images_inner(cmd, ai_config, chunk) self._process_images_inner(cmd, ai_config, chunk)
cdef split_to_tiles(self, frame, path, overlap_percent): cdef split_to_tiles(self, frame, path, tile_size, overlap_percent):
constants_inf.log(<str>f'splitting image {path} to tiles...') constants_inf.log(<str>f'splitting image {path} to tiles...')
img_h, img_w, _ = frame.shape img_h, img_w, _ = frame.shape
stride_w = int(self.tile_width * (1 - overlap_percent / 100)) stride_w = int(tile_size * (1 - overlap_percent / 100))
stride_h = int(self.tile_height * (1 - overlap_percent / 100)) stride_h = int(tile_size * (1 - overlap_percent / 100))
results = [] results = []
original_media_name = Path(<str> path).stem.replace(" ", "") original_media_name = Path(<str> path).stem.replace(" ", "")
for y in range(0, img_h, stride_h): for y in range(0, img_h, stride_h):
for x in range(0, img_w, stride_w): for x in range(0, img_w, stride_w):
x_end = min(x + self.tile_width, img_w) x_end = min(x + tile_size, img_w)
y_end = min(y + self.tile_height, img_h) y_end = min(y + tile_size, img_h)
# correct x,y for the close-to-border tiles # correct x,y for the close-to-border tiles
if x_end - x < self.tile_width: if x_end - x < tile_size:
if img_w - (x - stride_w) <= self.tile_width: if img_w - (x - stride_w) <= tile_size:
continue # the previous tile already covered the last gap continue # the previous tile already covered the last gap
x = img_w - self.tile_width x = img_w - tile_size
if y_end - y < self.tile_height: if y_end - y < tile_size:
if img_h - (y - stride_h) <= self.tile_height: if img_h - (y - stride_h) <= tile_size:
continue # the previous tile already covered the last gap continue # the previous tile already covered the last gap
y = img_h - self.tile_height y = img_h - tile_size
tile = frame[y:y_end, x:x_end] tile = frame[y:y_end, x:x_end]
name = f'{original_media_name}{constants_inf.SPLIT_SUFFIX}{x:04d}_{y:04d}!_000000' name = f'{original_media_name}{constants_inf.SPLIT_SUFFIX}{tile_size:04d}{x:04d}_{y:04d}!_000000'
results.append((tile, original_media_name, name)) results.append((tile, original_media_name, name))
return results return results
@@ -337,14 +331,15 @@ cdef class Inference:
cdef remove_tiled_duplicates(self, Annotation annotation): cdef remove_tiled_duplicates(self, Annotation annotation):
right = annotation.name.rindex('!') right = annotation.name.rindex('!')
left = annotation.name.index(constants_inf.SPLIT_SUFFIX) + len(constants_inf.SPLIT_SUFFIX) left = annotation.name.index(constants_inf.SPLIT_SUFFIX) + len(constants_inf.SPLIT_SUFFIX)
x_str, y_str = annotation.name[left:right].split('_') tile_size_str, x_str, y_str = annotation.name[left:right].split('_')
tile_size = int(tile_size_str)
x = int(x_str) x = int(x_str)
y = int(y_str) y = int(y_str)
for det in annotation.detections: for det in annotation.detections:
x1 = det.x * self.tile_width x1 = det.x * tile_size
y1 = det.y * self.tile_height y1 = det.y * tile_size
det_abs = Detection(x + x1, y + y1, det.w * self.tile_width, det.h * self.tile_height, det.cls, det.confidence) det_abs = Detection(x + x1, y + y1, det.w * tile_size, det.h * tile_size, det.cls, det.confidence)
detections = self._tile_detections.setdefault(annotation.original_media_name, []) detections = self._tile_detections.setdefault(annotation.original_media_name, [])
if det_abs in detections: if det_abs in detections:
annotation.detections.remove(det) annotation.detections.remove(det)
+1 -1
View File
@@ -37,7 +37,7 @@ cdef class CommandProcessor:
continue continue
except Exception as e: except Exception as e:
traceback.print_exc() traceback.print_exc()
constants_inf.log('EXIT!') constants_inf.log(<str>'EXIT!')
cdef on_command(self, RemoteCommand command): cdef on_command(self, RemoteCommand command):
try: try:
+4
View File
@@ -19,6 +19,10 @@ venv\Scripts\pip install -r requirements.txt
venv\Scripts\pip install --upgrade pyinstaller pyinstaller-hooks-contrib venv\Scripts\pip install --upgrade pyinstaller pyinstaller-hooks-contrib
venv\Scripts\python setup.py build_ext --inplace venv\Scripts\python setup.py build_ext --inplace
if %errorlevel% neq 0 (
echo "Error building cython extension"
exit /b %errorlevel%
)
echo install azaion-loader echo install azaion-loader
venv\Scripts\pyinstaller --name=azaion-loader ^ venv\Scripts\pyinstaller --name=azaion-loader ^
+2
View File
@@ -1,3 +1,5 @@
cdef str _CACHED_HW_INFO
cdef class HardwareService: cdef class HardwareService:
@staticmethod @staticmethod
+5 -1
View File
@@ -1,13 +1,17 @@
import os import os
import subprocess import subprocess
cimport constants cimport constants
cdef class HardwareService:
cdef str _CACHED_HW_INFO = None cdef str _CACHED_HW_INFO = None
cdef class HardwareService:
@staticmethod @staticmethod
cdef str get_hardware_info(): cdef str get_hardware_info():
global _CACHED_HW_INFO global _CACHED_HW_INFO
if _CACHED_HW_INFO is not None: if _CACHED_HW_INFO is not None:
constants.log(<str>"Using cached hardware info")
return <str> _CACHED_HW_INFO return <str> _CACHED_HW_INFO
if os.name == 'nt': # windows if os.name == 'nt': # windows
+7 -7
View File
@@ -57,7 +57,7 @@ namespace Azaion.Annotator.Test;
var detections = new List<CanvasLabel> var detections = new List<CanvasLabel>
{ {
new(100, 150, 100, 150), new(100, 150, 100, 150),
new(2000, 2050, 2000, 2050) // More than Constants.AI_TILE_SIZE away new(2000, 2050, 2000, 2050) // More than Constants.AI_TILE_SIZE_DEFAULT away
}; };
// Act // Act
@@ -139,11 +139,11 @@ namespace Azaion.Annotator.Test;
{ {
// Arrange // Arrange
var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE); var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE);
// Combined width is 1270. 1270 + BORDER (10) is not > Constants.AI_TILE_SIZE (1280), so they fit. // Combined width is 1270. 1270 + BORDER (10) is not > Constants.AI_TILE_SIZE_DEFAULT (1280), so they fit.
var detections = new List<CanvasLabel> var detections = new List<CanvasLabel>
{ {
new(0, 50, 0, 50), new(0, 50, 0, 50),
new(Constants.AI_TILE_SIZE - TileProcessor.BORDER - 50, Constants.AI_TILE_SIZE - TileProcessor.BORDER, 0, 50) new(Constants.AI_TILE_SIZE_DEFAULT - TileProcessor.BORDER - 50, Constants.AI_TILE_SIZE_DEFAULT - TileProcessor.BORDER, 0, 50)
}; };
// Act // Act
@@ -159,11 +159,11 @@ namespace Azaion.Annotator.Test;
{ {
// Arrange // Arrange
var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE); var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE);
// Combined width is 1271. 1271 + BORDER (10) is > Constants.AI_TILE_SIZE (1280), so they don't fit. // Combined width is 1271. 1271 + BORDER (10) is > Constants.AI_TILE_SIZE_DEFAULT (1280), so they don't fit.
var detections = new List<CanvasLabel> var detections = new List<CanvasLabel>
{ {
new(0, 50, 1000, 1050), // Top-most new(0, 50, 1000, 1050), // Top-most
new(Constants.AI_TILE_SIZE - TileProcessor.BORDER - 49, Constants.AI_TILE_SIZE - TileProcessor.BORDER + 1, 0, 50) new(Constants.AI_TILE_SIZE_DEFAULT - TileProcessor.BORDER - 49, Constants.AI_TILE_SIZE_DEFAULT - TileProcessor.BORDER + 1, 0, 50)
}; };
// Act // Act
@@ -224,7 +224,7 @@ namespace Azaion.Annotator.Test;
{ {
// Arrange // Arrange
var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE); var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE);
var largeDetection = new CanvasLabel(100, 100 + Constants.AI_TILE_SIZE + 100, 100, 200); var largeDetection = new CanvasLabel(100, 100 + Constants.AI_TILE_SIZE_DEFAULT + 100, 100, 200);
var detections = new List<CanvasLabel> { largeDetection }; var detections = new List<CanvasLabel> { largeDetection };
// Act // Act
@@ -245,7 +245,7 @@ namespace Azaion.Annotator.Test;
{ {
// Arrange // Arrange
var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE); var originalSize = new Size(IMAGE_SIZE, IMAGE_SIZE);
var largeTallDetection = new CanvasLabel(100, 150, 100, 100 + Constants.AI_TILE_SIZE + 200); var largeTallDetection = new CanvasLabel(100, 150, 100, 100 + Constants.AI_TILE_SIZE_DEFAULT + 200);
var smallDetectionNearby = new CanvasLabel(largeTallDetection.Right + 15, largeTallDetection.Right + 35, 700, 720); var smallDetectionNearby = new CanvasLabel(largeTallDetection.Right + 15, largeTallDetection.Right + 35, 700, 720);
var detections = new List<CanvasLabel> { largeTallDetection, smallDetectionNearby }; var detections = new List<CanvasLabel> { largeTallDetection, smallDetectionNearby };