mirror of
https://github.com/azaion/annotations.git
synced 2026-04-22 13:26:30 +00:00
separate load functionality from inference client to loader client. Call loader client from inference to get the model.
remove dummy dlls, remove resource loader from c#. TODO: Load dlls separately by Loader UI and loader client WIP
This commit is contained in:
@@ -1,34 +1,9 @@
|
||||
import os
|
||||
import subprocess
|
||||
import pynvml
|
||||
|
||||
|
||||
cdef class HardwareService:
|
||||
|
||||
@staticmethod
|
||||
cdef has_nvidia_gpu():
|
||||
try:
|
||||
pynvml.nvmlInit()
|
||||
device_count = pynvml.nvmlDeviceGetCount()
|
||||
|
||||
if device_count > 0:
|
||||
print(f"Found NVIDIA GPU(s).")
|
||||
return True
|
||||
else:
|
||||
print("No NVIDIA GPUs found by NVML.")
|
||||
return False
|
||||
|
||||
except pynvml.NVMLError as error:
|
||||
print(f"Failed to find NVIDIA GPU")
|
||||
return False
|
||||
finally:
|
||||
try:
|
||||
pynvml.nvmlShutdown()
|
||||
except:
|
||||
print('Failed to shutdown pynvml cause probably no NVidia GPU')
|
||||
pass
|
||||
|
||||
cdef str get_hardware_info(self):
|
||||
cdef str get_hardware_info():
|
||||
if os.name == 'nt': # windows
|
||||
os_command = (
|
||||
"powershell -Command \""
|
||||
|
||||
Reference in New Issue
Block a user