fix converting model initialization

This commit is contained in:
Oleksandr Bezdieniezhnykh
2025-09-04 10:39:41 +03:00
parent b3665630ed
commit be77a81875
8 changed files with 97 additions and 41 deletions
+3 -3
View File
@@ -100,14 +100,14 @@ cdef class TensorRTEngine(InferenceEngine):
return None
if builder.platform_has_fast_fp16:
constants_inf.log('Converting to supported fp16')
constants_inf.log(<str>'Converting to supported fp16')
config.set_flag(trt.BuilderFlag.FP16)
else:
constants_inf.log('Converting to supported fp32. (fp16 is not supported)')
constants_inf.log(<str>'Converting to supported fp32. (fp16 is not supported)')
plan = builder.build_serialized_network(network, config)
if plan is None:
constants_inf.logerror('Conversion failed.')
constants_inf.logerror(<str>'Conversion failed.')
return None
constants_inf.log('conversion done!')
return bytes(plan)