read cdn yaml config from api

automate tensorrt model conversion in case of no existing one for user's gpu
This commit is contained in:
Alex Bezdieniezhnykh
2025-04-23 23:20:08 +03:00
parent c68c293448
commit e798af470b
23 changed files with 265 additions and 93 deletions
+64 -20
View File
@@ -6,6 +6,7 @@ import onnxruntime as onnx
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit # required for automatically initialize CUDA, do not remove.
import pynvml
cdef class InferenceEngine:
@@ -28,13 +29,13 @@ cdef class InferenceEngine:
cdef class OnnxEngine(InferenceEngine):
def __init__(self, model_bytes: bytes, batch_size: int = 1, **kwargs):
super().__init__(model_bytes, batch_size)
self.batch_size = batch_size
self.session = onnx.InferenceSession(model_bytes, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
self.model_inputs = self.session.get_inputs()
self.input_name = self.model_inputs[0].name
self.input_shape = self.model_inputs[0].shape
if self.input_shape[0] != -1:
self.batch_size = self.input_shape[0]
self.batch_size = self.input_shape[0] if self.input_shape[0] != -1 else batch_size
print(f'AI detection model input: {self.model_inputs} {self.input_shape}')
model_meta = self.session.get_modelmeta()
print("Metadata:", model_meta.custom_metadata_map)
@@ -57,25 +58,12 @@ cdef class OnnxEngine(InferenceEngine):
cdef class TensorRTEngine(InferenceEngine):
def __init__(self, model_bytes: bytes, batch_size: int = 4, **kwargs):
super().__init__(model_bytes, batch_size)
self.batch_size = batch_size
print('Enter init TensorRT')
try:
logger = trt.Logger(trt.Logger.WARNING)
metadata_len = struct.unpack("<I", model_bytes[:4])[0]
try:
metadata = json.loads(model_bytes[4:4 + metadata_len])
print(f"Model metadata: {json.dumps(metadata, indent=2)}")
string_dict = metadata['names']
self.class_names = {int(k): v for k, v in string_dict.items()}
except json.JSONDecodeError:
print(f"Failed to parse metadata")
return
engine_data = model_bytes[4 + metadata_len:]
runtime = trt.Runtime(logger)
engine = runtime.deserialize_cuda_engine(engine_data)
engine = runtime.deserialize_cuda_engine(model_bytes)
if engine is None:
raise RuntimeError(f"Failed to load TensorRT engine from bytes")
@@ -84,8 +72,7 @@ cdef class TensorRTEngine(InferenceEngine):
# input
self.input_name = engine.get_tensor_name(0)
engine_input_shape = engine.get_tensor_shape(self.input_name)
if engine_input_shape[0] != -1:
self.batch_size = engine_input_shape[0]
self.batch_size = self.input_shape[0] if self.input_shape[0] != -1 else batch_size
self.input_shape = [
self.batch_size,
@@ -101,7 +88,7 @@ cdef class TensorRTEngine(InferenceEngine):
self.output_name = engine.get_tensor_name(1)
engine_output_shape = tuple(engine.get_tensor_shape(self.output_name))
self.output_shape = [
batch_size if self.input_shape[0] == -1 else self.input_shape[0],
self.batch_size,
300 if engine_output_shape[1] == -1 else engine_output_shape[1], # max detections number
6 if engine_output_shape[2] == -1 else engine_output_shape[2] # x1 y1 x2 y2 conf cls
]
@@ -113,6 +100,63 @@ cdef class TensorRTEngine(InferenceEngine):
except Exception as e:
raise RuntimeError(f"Failed to initialize TensorRT engine: {str(e)}")
@staticmethod
cdef unsigned long long get_gpu_memory_bytes(device_id=0):
total_memory = None
try:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
total_memory = mem_info.total
except pynvml.NVMLError:
total_memory = None
finally:
try:
pynvml.nvmlShutdown()
except pynvml.NVMLError:
pass
return 2 * 1024 * 1024 * 1024 if total_memory is None else total_memory # default 2 Gb
@staticmethod
cdef str get_engine_filename(device_id=0):
try:
device = cuda.Device(device_id)
sm_count = device.multiprocessor_count
cc_major, cc_minor = device.compute_capability()
return f"azaion.cc_{cc_major}.{cc_minor}_sm_{sm_count}.engine"
except Exception:
return None
@staticmethod
cdef bytes convert_from_onnx(bytes onnx_model):
cdef unsigned long long workspace_bytes = int(TensorRTEngine.get_gpu_memory_bytes() * 0.9)
explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
trt_logger = trt.Logger(trt.Logger.WARNING)
with trt.Builder(trt_logger) as builder, \
builder.create_network(explicit_batch_flag) as network, \
trt.OnnxParser(network, trt_logger) as parser, \
builder.create_builder_config() as config:
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace_bytes)
if not parser.parse(onnx_model):
return None
if builder.platform_has_fast_fp16:
print('Converting to supported fp16')
config.set_flag(trt.BuilderFlag.FP16)
else:
print('Converting to supported fp32. (fp16 is not supported)')
plan = builder.build_serialized_network(network, config)
if plan is None:
print('Conversion failed.')
return None
return bytes(plan)
cdef tuple get_input_shape(self):
return self.input_shape[2], self.input_shape[3]