Skip to content

Commit

Permalink
Fix : Nemotron tokenizer for GGUF format (huggingface#35836)
Browse files Browse the repository at this point in the history
fix nemotron gguf
  • Loading branch information
MekkCyber authored and elvircrn committed Feb 13, 2025
1 parent 96f7d49 commit 87a49ed
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 2 deletions.
1 change: 1 addition & 0 deletions src/transformers/models/auto/tokenization_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,7 @@
("musicgen_melody", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)),
("myt5", ("MyT5Tokenizer", None)),
("nemotron", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)),
("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
(
"nllb",
Expand Down
4 changes: 2 additions & 2 deletions tests/quantization/ggml/test_ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -835,9 +835,9 @@ def test_nemotron_q6_k(self):

tokenizer = AutoTokenizer.from_pretrained(self.nemotron_model_id, gguf_file=self.q6_k_nemotron_model_id)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
out = model.generate(text, max_new_tokens=16)

EXPECTED_TEXT = "'Hello. hotmail.com.'"
EXPECTED_TEXT = "Hello.hotmail.com</s>"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)

def test_gemma2_q3_k(self):
Expand Down

0 comments on commit 87a49ed

Please sign in to comment.