write logs for inference and loader to file

This commit is contained in:
Alex Bezdieniezhnykh
2025-06-14 16:08:32 +03:00
parent 8aa2f563a4
commit 6f297c4ebf
30 changed files with 218 additions and 140 deletions
+10 -10
View File
@@ -16,7 +16,7 @@ cdef int check_tensor_gpu_index():
deviceCount = pynvml.nvmlDeviceGetCount()
if deviceCount == 0:
print('No NVIDIA GPUs found.')
constants.logerror('No NVIDIA GPUs found.')
return -1
for i in range(deviceCount):
@@ -24,10 +24,10 @@ cdef int check_tensor_gpu_index():
major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
if major > 6 or (major == 6 and minor >= 1):
print('found NVIDIA GPU!')
constants.log('found NVIDIA GPU!')
return i
print('NVIDIA GPU doesnt support TensorRT!')
constants.logerror('NVIDIA GPU doesnt support TensorRT!')
return -1
except pynvml.NVMLError:
@@ -36,7 +36,7 @@ cdef int check_tensor_gpu_index():
try:
pynvml.nvmlShutdown()
except:
print('Failed to shutdown pynvml cause probably no NVidia GPU')
constants.logerror('Failed to shutdown pynvml cause probably no NVIDIA GPU')
pass
tensor_gpu_index = check_tensor_gpu_index()
@@ -70,15 +70,15 @@ cdef class Inference:
res = self.loader_client.load_big_small_resource(engine_filename, models_dir)
if res.err is None:
print('tensor rt engine is here, no need to build')
constants.log('tensor rt engine is here, no need to build')
self.is_building_engine = False
updater_callback('enabled')
return
print(res.err)
constants.logerror(res.err)
# time.sleep(8) # prevent simultaneously loading dll and models
updater_callback('converting')
print('try to load onnx')
constants.log('try to load onnx')
res = self.loader_client.load_big_small_resource(constants.AI_ONNX_MODEL_FILE, models_dir)
if res.err is not None:
updater_callback(f'Error. {res.err}')
@@ -87,7 +87,7 @@ cdef class Inference:
res = self.loader_client.upload_big_small_resource(model_bytes, <str> engine_filename, models_dir)
if res.err is not None:
updater_callback(f'Error. {res.err}')
print(f'uploaded {engine_filename} to CDN and API')
constants.log(f'uploaded {engine_filename} to CDN and API')
self.is_building_engine = False
updater_callback('enabled')
except Exception as e:
@@ -212,11 +212,11 @@ cdef class Inference:
# images first, it's faster
if len(images) > 0:
for chunk in self.split_list_extend(images, self.engine.get_batch_size()):
print(f'run inference on {" ".join(chunk)}...')
constants.log(f'run inference on {" ".join(chunk)}...')
self._process_images(cmd, ai_config, chunk)
if len(videos) > 0:
for v in videos:
print(f'run inference on {v}...')
constants.log(f'run inference on {v}...')
self._process_video(cmd, ai_config, v)