mirror of
https://github.com/azaion/annotations.git
synced 2026-04-22 12:56:30 +00:00
refactor external clients
put model batch size as parameter in config
This commit is contained in:
@@ -132,7 +132,7 @@ cdef class Inference:
|
||||
images.append(m)
|
||||
# images first, it's faster
|
||||
if len(images) > 0:
|
||||
for chunk in self.split_list_extend(images, constants.MODEL_BATCH_SIZE):
|
||||
for chunk in self.split_list_extend(images, ai_config.model_batch_size):
|
||||
print(f'run inference on {" ".join(chunk)}...')
|
||||
self._process_images(cmd, ai_config, chunk)
|
||||
if len(videos) > 0:
|
||||
@@ -158,7 +158,7 @@ cdef class Inference:
|
||||
batch_frames.append(frame)
|
||||
batch_timestamps.append(int(v_input.get(cv2.CAP_PROP_POS_MSEC)))
|
||||
|
||||
if len(batch_frames) == constants.MODEL_BATCH_SIZE:
|
||||
if len(batch_frames) == ai_config.model_batch_size:
|
||||
input_blob = self.preprocess(batch_frames)
|
||||
outputs = self.session.run(None, {self.model_input: input_blob})
|
||||
list_detections = self.postprocess(outputs, ai_config)
|
||||
|
||||
Reference in New Issue
Block a user