From 5b1042f775e2f537add252d11cd4a1afc4993b9a Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Wed, 24 Jul 2024 01:05:09 -0700 Subject: [PATCH] [Bugfix] Fix token padding for chameleon (#6724) Signed-off-by: Alvant --- vllm/model_executor/models/chameleon.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 6ece95495a026..7659f598bab94 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -125,7 +125,8 @@ def input_processor_for_chameleon(ctx: InputContext, llm_inputs: LLMInputs): # Appending sep token for chat mode to follow default processor # behavior - new_prompt += tokenizer.sep_token + if new_prompt is not None: + new_prompt += tokenizer.sep_token new_token_ids += [CHAMELEON_SEP_TOKEN_ID] # NOTE: Create a defensive copy of the original inputs