Skip to content

Commit

Permalink
llama : fix tokenizer to use llama_char_to_byte
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Aug 17, 2023
1 parent 0ba5d48 commit 7b6ae89
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 10 deletions.
22 changes: 17 additions & 5 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2303,6 +2303,18 @@ static uint8_t llama_byte_to_char(const llama_vocab & vocab, uint8_t byte) {
return false;
}

static uint8_t llama_char_to_byte(const llama_vocab & vocab, uint8_t ch) {
if (llama_vocab_type(vocab) == "spm") {
return ch + 3;
}

if (llama_vocab_type(vocab) == "bpe") {
return ch - 32;
}

return false;
}

static std::string llama_escape_whitespace(const std::string& text) {
std::string result;
bool escaping = false;
Expand Down Expand Up @@ -2439,7 +2451,7 @@ struct llama_tokenizer {
if (p == rev_merge.end()) {
// output any symbols that did not form tokens as bytes.
for (int j = 0; j < (int)symbol.n; ++j) {
llama_vocab::id token_id = llama_byte_to_char(vocab_, symbol.text[j]);
llama_vocab::id token_id = llama_char_to_byte(vocab_, symbol.text[j]);
output.push_back(token_id);
}
return;
Expand Down Expand Up @@ -4871,8 +4883,8 @@ int llama_token_to_str_with_model(const struct llama_model * model, llama_token
return 0;
}

int llama_token_to_str(const struct llama_context * ctx, llama_token token, char * str, int length) {
return llama_token_to_str_with_model(&ctx->model, token, str, length);
int llama_token_to_str(const struct llama_context * ctx, llama_token token, char * buf, int length) {
return llama_token_to_str_with_model(&ctx->model, token, buf, length);
}

std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) {
Expand All @@ -4889,13 +4901,13 @@ std::string llama_token_to_str(const struct llama_context * ctx, llama_token tok
return std::string(result.data(), result.size());
}

int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token, char * str, int length) {
int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token, char * buf, int length) {
if (0 <= token && token < llama_n_vocab_from_model(&ctx->model)) {
std::string result = ctx->model.vocab.id_to_token[token].tok;
if (length < (int) result.length()) {
return -result.length();
}
memcpy(str, result.c_str(), result.length());
memcpy(buf, result.c_str(), result.length());
return result.length();
}
return 0;
Expand Down
11 changes: 6 additions & 5 deletions tests/test-tokenizer-0.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ int main(int argc, char **argv) {
return 2;
}

bool success = true;

for (const auto & test_kv : k_tests()) {
std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, true);
fprintf(stderr, "%s : '%s' tokenized to '%s'\n",
Expand All @@ -103,7 +105,8 @@ int main(int argc, char **argv) {
}

if (!correct) {
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
fprintf(stderr, "%s : detokenized to: '%s'\n", __func__, unescape_whitespace(ctx, test_kv.second).c_str());
fprintf(stderr, "%s : expected tokens: ", __func__);
for (const auto & t : test_kv.second) {
fprintf(stderr, "%6d, ", t);
Expand All @@ -115,9 +118,7 @@ int main(int argc, char **argv) {
}
fprintf(stderr, "\n");

llama_free_model(model);
llama_free(ctx);
return 3;
success = false;
}
}

Expand All @@ -126,5 +127,5 @@ int main(int argc, char **argv) {

llama_backend_free();

return 0;
return success ? 0 : 3;
}

0 comments on commit 7b6ae89

Please sign in to comment.