Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Support to Gemma2 #75

Merged
merged 2 commits into from
Aug 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
torch>=2.0.0
numpy>=1.24.2
transformers>=4.37.2
transformers>=4.42.0
tokenizers>=0.15.2
termcolor>=2.4.0
sentencepiece>=0.1.99
protobuf>=4.25.2
setuptools>=69.0.3
line_profiler
datasets
line_profiler
16 changes: 16 additions & 0 deletions tests/test_accept_token_sequence/test_gemma2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import unittest

from transformers import AutoTokenizer

from tests.test_accept_token_sequence._test_accept_tokens_mixin import (
TokenizerTesterMixin,
)


class Gemma2TokenizerTest(TokenizerTesterMixin, unittest.TestCase):

tokenizer_class = AutoTokenizer
pretrained_name = "google/gemma-2-2b-it"

def setUp(self):
super().setUp()
7 changes: 5 additions & 2 deletions tests/test_metrics/test_metric.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import math
from unittest import TestCase

import torch
from transformers_cfg.metrics import ConstrainedDecodingMetric
from transformers_cfg.metrics.metrics import ConstrainedDecodingMetricOutput
from transformers_cfg.metrics.metrics import (
ConstrainedDecodingMetric,
ConstrainedDecodingMetricOutput,
)


class TestConstrainedDecodingMetric(TestCase):
Expand Down
2 changes: 2 additions & 0 deletions transformers_cfg/tokenization/SUPPORTED_TOKENIZERS.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
T5TokenizerFast,
CodeGenTokenizerFast,
PreTrainedTokenizerFast,
GemmaTokenizerFast
)

SUPPORTED_TOKENIZERS = {
Expand All @@ -14,4 +15,5 @@
T5TokenizerFast,
CodeGenTokenizerFast,
PreTrainedTokenizerFast,
GemmaTokenizerFast
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
CodeGenTokenizerFast,
LlamaTokenizerFast,
PreTrainedTokenizerFast,
GemmaTokenizerFast
)

from transformers_cfg.tokenization.utils import get_tokenizer_charset
Expand Down Expand Up @@ -38,7 +39,7 @@ def from_hf_tokenizer(cls, hf_tokenizer):
hf_tokenizer, (GPT2TokenizerFast, BartTokenizerFast, CodeGenTokenizerFast)
):
return GPT2TokenizerMiddleMapping(hf_tokenizer)
elif isinstance(hf_tokenizer, LlamaTokenizerFast):
elif isinstance(hf_tokenizer, (LlamaTokenizerFast, GemmaTokenizerFast)):
# deepseek, though inheriting from LlamaTokenizerFast, is actually a GPT2TokenizerFast
# check https://github.com/epfl-dlab/transformers-CFG/issues/72
if hf_tokenizer.name_or_path.startswith("deepseek-ai/deepseek-coder"):
Expand Down
3 changes: 2 additions & 1 deletion transformers_cfg/tokenization/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
T5TokenizerFast,
CodeGenTokenizerFast,
PreTrainedTokenizerFast,
GemmaTokenizerFast
)

from transformers_cfg.tokenization.SUPPORTED_TOKENIZERS import SUPPORTED_TOKENIZERS
Expand Down Expand Up @@ -53,7 +54,7 @@ def from_hf_tokenizer(cls, hf_tokenizer):
(GPT2TokenizerFast, BartTokenizerFast),
):
return TCFG_GPT2Tokenizer(hf_tokenizer)
elif isinstance(hf_tokenizer, (LlamaTokenizerFast, T5TokenizerFast)):
elif isinstance(hf_tokenizer, (LlamaTokenizerFast, GemmaTokenizerFast, T5TokenizerFast)):
return TCFG_LlamaTokenizer(hf_tokenizer)
elif isinstance(hf_tokenizer, CodeGenTokenizerFast):
# phi reuses the codegen tokenizer
Expand Down