separate load functionality from inference client to loader client. Call loader client from inference to get the model.

remove dummy dlls, remove resource loader from c#.

TODO: Load dlls separately by Loader UI and loader client

WIP
This commit is contained in:
Alex Bezdieniezhnykh
2025-06-06 20:04:03 +03:00
parent 500db31142
commit 7750025631
54 changed files with 353 additions and 571 deletions
+1 -26
View File
@@ -1,34 +1,9 @@
import os
import subprocess
import pynvml
cdef class HardwareService:
@staticmethod
cdef has_nvidia_gpu():
try:
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
if device_count > 0:
print(f"Found NVIDIA GPU(s).")
return True
else:
print("No NVIDIA GPUs found by NVML.")
return False
except pynvml.NVMLError as error:
print(f"Failed to find NVIDIA GPU")
return False
finally:
try:
pynvml.nvmlShutdown()
except:
print('Failed to shutdown pynvml cause probably no NVidia GPU')
pass
cdef str get_hardware_info(self):
cdef str get_hardware_info():
if os.name == 'nt': # windows
os_command = (
"powershell -Command \""