Skip to content

Commit

Permalink
use common_batch_add, reuse llama_batch in loop
Browse files Browse the repository at this point in the history
  • Loading branch information
ngxson committed Oct 12, 2024
1 parent b4c9911 commit 734f9e2
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 14 deletions.
13 changes: 6 additions & 7 deletions examples/imatrix/imatrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -496,6 +496,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
// clear the KV cache
llama_kv_cache_clear(ctx);

llama_batch batch = llama_batch_init(n_batch, 0, 1);

for (int j = 0; j < num_batches; ++j) {
const int batch_start = start + j * n_batch;
const int batch_size = std::min(end - batch_start, n_batch);
Expand All @@ -508,12 +510,9 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
}

llama_batch batch = llama_batch_init(batch_size, 0, 1);
common_batch_clear(batch);
for (int i = 0; i < batch_size; i++) {
batch. token[i] = tokens[batch_start + i];
batch. pos[i] = j*n_batch + i;
batch.logits[i] = true;
batch.seq_id[i][0] = 0;
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
}

if (llama_decode(ctx, batch)) {
Expand All @@ -522,8 +521,6 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
return false;
}

llama_batch_free(batch);

// restore the original token in case it was set to BOS
tokens[batch_start] = token_org;

Expand All @@ -533,6 +530,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
}
}

llama_batch_free(batch);

const auto t_end = std::chrono::high_resolution_clock::now();

if (i == 0) {
Expand Down
13 changes: 6 additions & 7 deletions examples/perplexity/perplexity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1800,6 +1800,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
// clear the KV cache
llama_kv_cache_clear(ctx);

llama_batch batch = llama_batch_init(n_batch, 0, 1);

for (int j = 0; j < num_batches; ++j) {
const int batch_start = start + j * n_batch;
const int batch_size = std::min(end - batch_start, n_batch);
Expand All @@ -1812,12 +1814,9 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
}

llama_batch batch = llama_batch_init(batch_size, 0, 1);
common_batch_clear(batch);
for (int i = 0; i < batch_size; i++) {
batch. token[i] = tokens[batch_start + i];
batch. pos[i] = j*n_batch + i;
batch.logits[i] = true;
batch.seq_id[i][0] = 0;
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
}

if (llama_decode(ctx, batch)) {
Expand All @@ -1826,8 +1825,6 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
return;
}

llama_batch_free(batch);

// restore the original token in case it was set to BOS
tokens[batch_start] = token_org;

Expand All @@ -1837,6 +1834,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
}
}

llama_batch_free(batch);

const auto t_end = std::chrono::high_resolution_clock::now();

if (i == 0) {
Expand Down

0 comments on commit 734f9e2

Please sign in to comment.