From 6738b852db7aa5d10905f2f5fc9ac5cc89d1f0b7 Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Wed, 11 Dec 2024 13:47:12 +0100 Subject: [PATCH] fix --- optimum_benchmark/backends/tensorrt_llm/backend.py | 2 +- optimum_benchmark/backends/tensorrt_llm/config.py | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/optimum_benchmark/backends/tensorrt_llm/backend.py b/optimum_benchmark/backends/tensorrt_llm/backend.py index 8205b83d..2dbc0fea 100644 --- a/optimum_benchmark/backends/tensorrt_llm/backend.py +++ b/optimum_benchmark/backends/tensorrt_llm/backend.py @@ -137,7 +137,7 @@ def trtllm_kwargs(self): if self.config.max_prompt_length is not None: kwargs["max_prompt_length"] = self.config.max_prompt_length - if self.config.tp is not None: + if self.config.max_new_tokens is not None: kwargs["max_new_tokens"] = self.config.max_new_tokens if self.config.max_beam_width is not None: diff --git a/optimum_benchmark/backends/tensorrt_llm/config.py b/optimum_benchmark/backends/tensorrt_llm/config.py index 8a011bbd..84d119af 100644 --- a/optimum_benchmark/backends/tensorrt_llm/config.py +++ b/optimum_benchmark/backends/tensorrt_llm/config.py @@ -4,7 +4,7 @@ from ...import_utils import tesnorrt_llm_version from ..config import BackendConfig -SUPPORTED_DTYPES = ["float16", "bfloat16", "float32"] +SUPPORTED_DTYPES = [None, "float16", "bfloat16", "float32"] @dataclass @@ -38,8 +38,13 @@ def __post_init__(self) -> None: if self.dtype not in SUPPORTED_DTYPES: raise ValueError(f"dtype must be one of float16, bfloat16, float32, got {self.dtype}") - if self.gpus_per_node != self.world_size: + if self.gpus_per_node is not None and self.world_size is not None and self.gpus_per_node != self.world_size: raise ValueError(f"gpus_per_node ({self.gpus_per_node}) != world_size ({self.world_size})") - if self.world_size != self.pp * self.tp: + if ( + self.world_size is not None + and self.pp is not None + and self.tp is not None + and self.world_size != self.pp * self.tp + ): raise ValueError(f"world_size ({self.gpus_per_node}) != pp ({self.pp}) * tp ({self.tp})")