add warning ai status, fix ai availability handling 1

This commit is contained in:
Oleksandr Bezdieniezhnykh
2025-09-03 16:36:26 +03:00
parent a7a99c49c0
commit b3665630ed
6 changed files with 31 additions and 12 deletions
+1
View File
@@ -496,6 +496,7 @@
<Button <Button
x:Name="AIDetectBtn" x:Name="AIDetectBtn"
IsEnabled="False"
Grid.Column="10" Grid.Column="10"
Padding="2" Width="25" Padding="2" Width="25"
Height="25" Height="25"
@@ -483,5 +483,7 @@ public class AnnotatorEventHandler(
mainWindow.AIDetectBtn.IsEnabled = e.Status == AIAvailabilityEnum.Enabled; mainWindow.AIDetectBtn.IsEnabled = e.Status == AIAvailabilityEnum.Enabled;
mainWindow.StatusHelp.Text = e.ToString(); mainWindow.StatusHelp.Text = e.ToString();
}); });
1 if (e.Status is AIAvailabilityEnum.Enabled or AIAvailabilityEnum.Error)
await inferenceService.CheckAIAvailabilityTokenSource.CancelAsync();
} }
} }
@@ -10,6 +10,7 @@ public enum AIAvailabilityEnum
Converting = 20, Converting = 20,
Uploading = 30, Uploading = 30,
Enabled = 200, Enabled = 200,
Warning = 300,
Error = 500 Error = 500
} }
@@ -27,6 +28,7 @@ public class AIAvailabilityStatusEvent : INotification
{ AIAvailabilityEnum.Converting, "Йде налаштування AI під Ваше залізо. (5-12 хвилин в залежності від моделі відеокарти, до 50 хв на старих GTX1650)" }, { AIAvailabilityEnum.Converting, "Йде налаштування AI під Ваше залізо. (5-12 хвилин в залежності від моделі відеокарти, до 50 хв на старих GTX1650)" },
{ AIAvailabilityEnum.Uploading, "Йде зберігання AI" }, { AIAvailabilityEnum.Uploading, "Йде зберігання AI" },
{ AIAvailabilityEnum.Enabled, "AI готовий для розпізнавання" }, { AIAvailabilityEnum.Enabled, "AI готовий для розпізнавання" },
{ AIAvailabilityEnum.Warning, "Неможливо запустити AI наразі, йде налаштування під Ваше залізо" },
{ AIAvailabilityEnum.Error, "Помилка під час налаштування AI" } { AIAvailabilityEnum.Error, "Помилка під час налаштування AI" }
}; };
@@ -9,25 +9,37 @@ public interface IInferenceService
{ {
Task RunInference(List<string> mediaPaths, int tileSize, CancellationToken ct = default); Task RunInference(List<string> mediaPaths, int tileSize, CancellationToken ct = default);
CancellationTokenSource InferenceCancelTokenSource { get; set; } CancellationTokenSource InferenceCancelTokenSource { get; set; }
CancellationTokenSource CheckAIAvailabilityTokenSource { get; set; }
void StopInference(); void StopInference();
} }
// SHOULD BE ONLY ONE INSTANCE OF InferenceService. Do not add ANY NotificationHandler to it! // SHOULD BE ONLY ONE INSTANCE OF InferenceService. Do not add ANY NotificationHandler to it!
// _inferenceCancelTokenSource should be created only once. // _inferenceCancelTokenSource should be created only once.
public class InferenceService( public class InferenceService : IInferenceService
IInferenceClient client,
IAzaionApi azaionApi,
IOptions<AIRecognitionConfig> aiConfigOptions) : IInferenceService
{ {
private readonly IInferenceClient _client;
private readonly IAzaionApi _azaionApi;
private readonly IOptions<AIRecognitionConfig> _aiConfigOptions;
public InferenceService(IInferenceClient client,
IAzaionApi azaionApi,
IOptions<AIRecognitionConfig> aiConfigOptions)
{
_client = client;
_azaionApi = azaionApi;
_aiConfigOptions = aiConfigOptions;
_ = Task.Run(async () => await CheckAIAvailabilityStatus());
}
public CancellationTokenSource InferenceCancelTokenSource { get; set; } = new(); public CancellationTokenSource InferenceCancelTokenSource { get; set; } = new();
public CancellationTokenSource CheckAIAvailabilityTokenSource { get; set; } = new(); public CancellationTokenSource CheckAIAvailabilityTokenSource { get; set; } = new();
public async Task CheckAIAvailabilityStatus() private async Task CheckAIAvailabilityStatus()
{ {
CheckAIAvailabilityTokenSource = new CancellationTokenSource(); CheckAIAvailabilityTokenSource = new CancellationTokenSource();
while (!CheckAIAvailabilityTokenSource.IsCancellationRequested) while (!CheckAIAvailabilityTokenSource.IsCancellationRequested)
{ {
client.Send(RemoteCommand.Create(CommandType.AIAvailabilityCheck)); _client.Send(RemoteCommand.Create(CommandType.AIAvailabilityCheck));
await Task.Delay(10000, CheckAIAvailabilityTokenSource.Token); await Task.Delay(10000, CheckAIAvailabilityTokenSource.Token);
} }
} }
@@ -35,16 +47,16 @@ public class InferenceService(
public async Task RunInference(List<string> mediaPaths, int tileSize, CancellationToken ct = default) public async Task RunInference(List<string> mediaPaths, int tileSize, CancellationToken ct = default)
{ {
InferenceCancelTokenSource = new CancellationTokenSource(); InferenceCancelTokenSource = new CancellationTokenSource();
client.Send(RemoteCommand.Create(CommandType.Login, azaionApi.Credentials)); _client.Send(RemoteCommand.Create(CommandType.Login, _azaionApi.Credentials));
var aiConfig = aiConfigOptions.Value; var aiConfig = _aiConfigOptions.Value;
aiConfig.Paths = mediaPaths; aiConfig.Paths = mediaPaths;
aiConfig.TileSize = tileSize; aiConfig.TileSize = tileSize;
client.Send(RemoteCommand.Create(CommandType.Inference, aiConfig)); _client.Send(RemoteCommand.Create(CommandType.Inference, aiConfig));
using var combinedTokenSource = CancellationTokenSource.CreateLinkedTokenSource(ct, InferenceCancelTokenSource.Token); using var combinedTokenSource = CancellationTokenSource.CreateLinkedTokenSource(ct, InferenceCancelTokenSource.Token);
await combinedTokenSource.Token.AsTask(); await combinedTokenSource.Token.AsTask();
} }
public void StopInference() => client.Stop(); public void StopInference() => _client.Stop();
} }
@@ -7,6 +7,7 @@ AIStatus2Text = {
AIAvailabilityEnum.CONVERTING: "Converting", AIAvailabilityEnum.CONVERTING: "Converting",
AIAvailabilityEnum.UPLOADING: "Uploading", AIAvailabilityEnum.UPLOADING: "Uploading",
AIAvailabilityEnum.ENABLED: "Enabled", AIAvailabilityEnum.ENABLED: "Enabled",
AIAvailabilityEnum.WARNING: "Warning",
AIAvailabilityEnum.ERROR: "Error", AIAvailabilityEnum.ERROR: "Error",
} }
+2 -1
View File
@@ -91,7 +91,7 @@ cdef class Inference:
self.engine = TensorRTEngine(res.data) self.engine = TensorRTEngine(res.data)
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED) self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
except Exception as e: except Exception as e:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, <str>str(e)) self.ai_availability_status.set_status(AIAvailabilityEnum.WARNING, <str>str(e))
onnx_engine_bytes = self.get_onnx_engine_bytes() onnx_engine_bytes = self.get_onnx_engine_bytes()
self.ai_availability_status.set_status(AIAvailabilityEnum.CONVERTING) self.ai_availability_status.set_status(AIAvailabilityEnum.CONVERTING)
model_bytes = TensorRTEngine.convert_from_onnx(onnx_engine_bytes) model_bytes = TensorRTEngine.convert_from_onnx(onnx_engine_bytes)
@@ -100,6 +100,7 @@ cdef class Inference:
res = self.loader_client.upload_big_small_resource(model_bytes, <str> engine_filename, models_dir) res = self.loader_client.upload_big_small_resource(model_bytes, <str> engine_filename, models_dir)
if res.err is not None: if res.err is not None:
self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, res.err) self.ai_availability_status.set_status(AIAvailabilityEnum.ERROR, res.err)
else:
self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED) self.ai_availability_status.set_status(AIAvailabilityEnum.ENABLED)
else: else:
self.engine = OnnxEngine(<bytes>self.get_onnx_engine_bytes()) self.engine = OnnxEngine(<bytes>self.get_onnx_engine_bytes())