Skip to content

Commit

Permalink
llama : fix pre-tokenization of non-special added tokens ggerganov#8228
Browse files Browse the repository at this point in the history


llama : fix mpt and olmo pre-tokenizer

llama : pre-tokenize non-special user-defined tokens first

llama : fix detection of control-like user-defined tokens

convert_hf : identify which user-defined tokens are control tokens

Only used in _set_vocab_gpt2() for now.

convert_hf : identify more added control tokens for SPM tokenziers

This makes Gemma and Gemma-2 tokenize pretty much EVERYTHING correctly,
including HTML tags and consecutive spaces,
but it unfortunately requires model re-conversion.

There seems to be a weird behavior of the HF tokenizer for Gemma,
which prefers to use the 16-space token over more lengthy space tokens,
while using the SentencePiece tokenizer does not do this.
(the implementation in llama.cpp has the same behavior as SentencePiece)

* llama : fix wrong pre-tokenization of byte tokens

llama : fix Viking pre-tokenizer regex

The order was previously wrong, which caused errors in some tests.

llama : fix command-r detokenization
  • Loading branch information
compilade authored and Nexesenex committed Jul 9, 2024
1 parent bf1c02b commit 8f980e1
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 36 deletions.
71 changes: 57 additions & 14 deletions convert-hf-to-gguf.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,29 @@ def from_model_architecture(cls, arch: str) -> type[Model]:
except KeyError:
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None

def does_token_look_special(self, token: str | bytes) -> bool:
if isinstance(token, (bytes, bytearray)):
token_text = token.decode(encoding="utf-8")
elif isinstance(token, memoryview):
token_text = token.tobytes().decode(encoding="utf-8")
else:
token_text = token

# Some models mark some added tokens which ought to be control tokens as not special.
# (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
seems_special = token_text in (
"<pad>", # deepseek-coder
"<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
)

seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder

# TODO: should these be marked as UNUSED instead? (maybe not)
seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}

return seems_special

# used for GPT-2 BPE and WordPiece vocabs
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
tokens: list[str] = []
Expand All @@ -415,16 +438,18 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]:
for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
if tokenizer.added_tokens_decoder[i].special:
toktypes.append(gguf.TokenType.CONTROL)
else:
toktypes.append(gguf.TokenType.USER_DEFINED)
toktypes.append(gguf.TokenType.UNUSED)
else:
tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.NORMAL)
token: str = reverse_vocab[i]
if token in added_vocab:
if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
toktypes.append(gguf.TokenType.CONTROL)
else:
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
toktypes.append(gguf.TokenType.USER_DEFINED)
else:
toktypes.append(gguf.TokenType.NORMAL)
tokens.append(token)

return tokens, toktypes, tokpre

Expand Down Expand Up @@ -583,7 +608,7 @@ def _set_vocab_qwen(self):
for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED)
toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.CONTROL)
Expand Down Expand Up @@ -660,6 +685,25 @@ def _set_vocab_sentencepiece(self):
scores[token_id] = -1000.0
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED

tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
if tokenizer_config_file.is_file():
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
tokenizer_config_json = json.load(f)
added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
for token_id, token_data in added_tokens_decoder.items():
token_id = int(token_id)
token: str = token_data["content"]
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
assert tokens[token_id] == token.encode("utf-8")
if token_data.get("special") or self.does_token_look_special(token):
toktypes[token_id] = SentencePieceTokenTypes.CONTROL
else:
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED

scores[token_id] = -1000.0
tokens[token_id] = token.encode("utf-8")

if vocab_size > len(tokens):
pad_count = vocab_size - len(tokens)
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
Expand Down Expand Up @@ -1290,7 +1334,7 @@ def set_vocab(self):
if (self.dir_model / "tokenizer.json").is_file():
self._set_vocab_gpt2()
else:
# StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab
# StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
self._set_vocab_qwen()

def set_gguf_parameters(self):
Expand Down Expand Up @@ -1609,7 +1653,6 @@ def set_gguf_parameters(self):
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])

self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
self.gguf_writer.add_file_type(self.ftype)

self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
Expand Down Expand Up @@ -2427,7 +2470,7 @@ class Gemma2Model(Model):
model_arch = gguf.MODEL_ARCH.GEMMA2

def set_vocab(self):
self._set_vocab_llama_hf()
self._set_vocab_sentencepiece()
self.gguf_writer.add_add_space_prefix(False)

def set_gguf_parameters(self):
Expand Down Expand Up @@ -3320,7 +3363,7 @@ def set_vocab(self):
for i in range(vocab_size):
if i not in reverse_vocab:
tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED)
toktypes.append(gguf.TokenType.UNUSED)
elif reverse_vocab[i] in added_vocab:
tokens.append(reverse_vocab[i])
if tokenizer.added_tokens_decoder[i].special:
Expand Down
29 changes: 14 additions & 15 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5537,6 +5537,7 @@ static void llm_load_vocab(
} else if (
tokenizer_pre == "command-r") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
vocab.tokenizer_clean_spaces = false;
} else if (
tokenizer_pre == "qwen2") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
Expand Down Expand Up @@ -5779,7 +5780,7 @@ static void llm_load_vocab(
// build special tokens cache
{
for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
if (!(vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL)) {
if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
vocab.cache_special_tokens.push_back(id);
}
}
Expand Down Expand Up @@ -15768,17 +15769,6 @@ struct llm_tokenizer_bpe {
"[0-9][0-9][0-9]",
};
break;
case LLAMA_VOCAB_PRE_TYPE_MPT:
// TODO: MPT pre-tokenization regexes are unknown
// the following are close, but not exact. run the following:
// ./bin/test-tokenizer-0 ../models/ggml-vocab-mpt.gguf
GGML_ASSERT("MPT pre-tokenization regexes are unknown - fixes needed");
regex_exprs = {
"\\s?\\p{L}+",
"\\s?\\p{P}+",
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
};
break;
case LLAMA_VOCAB_PRE_TYPE_STARCODER:
case LLAMA_VOCAB_PRE_TYPE_REFACT:
case LLAMA_VOCAB_PRE_TYPE_COMMAND_R:
Expand All @@ -15788,6 +15778,7 @@ struct llm_tokenizer_bpe {
};
break;
case LLAMA_VOCAB_PRE_TYPE_GPT2:
case LLAMA_VOCAB_PRE_TYPE_MPT:
case LLAMA_VOCAB_PRE_TYPE_OLMO:
case LLAMA_VOCAB_PRE_TYPE_JAIS:
regex_exprs = {
Expand All @@ -15814,8 +15805,8 @@ struct llm_tokenizer_bpe {
break;
case LLAMA_VOCAB_PRE_TYPE_VIKING:
regex_exprs = {
"\\p{N}",
" ?[^(\\s|.,!?…。,、।۔،)]+",
"\\p{N}",
};
break;
default:
Expand Down Expand Up @@ -16535,12 +16526,20 @@ struct fragment_buffer_variant {

// #define PRETOKENIZERDEBUG

static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer) {
static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer, bool parse_special) {
// for each special token
for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
const auto & data = vocab.id_to_token[special_id];
const auto & special_token = data.text;

if (!parse_special && (data.attr & LLAMA_TOKEN_ATTR_CONTROL)) {
// Only ignore control tokens when parse_special == false
continue;
// User-defined tokens are still pre-tokenized before everything else
// ref: https://github.com/huggingface/tokenizers/blob/fdd26ba9a3f0c133427aab0423888cbde91362d7/tokenizers/src/tokenizer/mod.rs#L726
// This is mostly relevant for neox-style tokenizers (mpt, olmo, stablelm, etc.)
}

// for each text fragment
std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
while (it != buffer.end()) {
Expand Down Expand Up @@ -16653,7 +16652,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &

if (!raw_text.empty()) {
fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
if (parse_special) tokenizer_st_partition(vocab, fragment_buffer);
tokenizer_st_partition(vocab, fragment_buffer, parse_special);
}

switch (vocab.type) {
Expand Down
4 changes: 2 additions & 2 deletions tests/test-tokenizer-0.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ int main(int argc, char **argv) {
const bool add_special = false;

for (const auto & test_kv : k_tests) {
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, true);
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special, false);

printf("\n");
printf("src: '%s'\n", test_kv.first.c_str());
Expand Down Expand Up @@ -253,7 +253,7 @@ int main(int argc, char **argv) {
{
const auto t_start = ggml_time_us();

res = llama_tokenize(ctx, text, add_special, true);
res = llama_tokenize(ctx, text, add_special, false);

const auto t_end = ggml_time_us();

Expand Down
10 changes: 5 additions & 5 deletions tests/test-tokenizer-random.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from typing import Iterator

import cffi
from transformers import AutoTokenizer
from transformers import AutoTokenizer, PreTrainedTokenizer


logger = logging.getLogger("test-tokenizer-random")
Expand Down Expand Up @@ -125,7 +125,7 @@ def decode(self, ids: list[int]) -> str:
class TokenizerGroundtruth (Tokenizer):

def __init__(self, dir_tokenizer: str):
self.model = AutoTokenizer.from_pretrained(dir_tokenizer)
self.model: PreTrainedTokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
# guess BOS and EOS
ids = self.encode("a")
assert 1 <= len(ids) <= 3
Expand All @@ -139,7 +139,7 @@ def __init__(self, dir_tokenizer: str):
self.vocab = list(sorted(self.vocab))
# tokens and lists
self.special_tokens = list(self.model.all_special_tokens)
self.added_tokens = list(self.model.added_tokens_encoder)
self.added_tokens = self.model.batch_decode(self.model.added_tokens_encoder.values(), skip_special_tokens=False)
self.bos_token = self.model.bos_token
self.eos_token = self.model.eos_token

Expand Down Expand Up @@ -454,8 +454,8 @@ def check_detokenizer(text: str, text1: str, text2: str) -> bool:
i = find_first_mismatch(ids1, ids2)
ids1 = list(ids1)[max(0, i - 2) : i + 5 + 1]
ids2 = list(ids2)[max(0, i - 2) : i + 5 + 1]
logger.error(" Expected: " + str(ids1))
logger.error(" Result: " + str(ids2))
logger.error(" Expected: " + str(ids1) + f" {[tokenizer1.decode([id]) for id in ids1]}")
logger.error(" Result: " + str(ids2) + f" {[tokenizer2.decode([id]) for id in ids2]}")
encode_errors += 1
logger.error(f" {encode_errors=}")
if decode_errors < MAX_ERRORS and not check_detokenizer(text, text1, text2):
Expand Down

0 comments on commit 8f980e1

Please sign in to comment.