Skip to content

Commit

Permalink
llama : apply K-cache roping for Falcon and Baichuan
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Sep 18, 2023
1 parent 0cbf3bf commit 7c1bdd0
Showing 1 changed file with 49 additions and 0 deletions.
49 changes: 49 additions & 0 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2746,6 +2746,7 @@ static struct ggml_cgraph * llm_build_llama(
ggml_set_name(cur, "attention_norm_0");
}

// shift the entire K-cache if needed
if (do_rope_shift) {
ggml_build_forward_expand(gf,
ggml_rope_custom_inplace(ctx0,
Expand Down Expand Up @@ -2987,6 +2988,8 @@ static struct ggml_cgraph * llm_build_baichaun(
const int32_t n_tokens = batch.n_tokens;
const int32_t n_kv = llama_kv_cache_cell_max(kv_self);

const bool do_rope_shift = kv_self.has_shift || ggml_allocr_is_measure(lctx.alloc);

auto & buf_compute = lctx.buf_compute;

struct ggml_init_params params = {
Expand Down Expand Up @@ -3090,6 +3093,16 @@ static struct ggml_cgraph * llm_build_baichaun(
}
}

// K_shift
struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
ggml_allocr_alloc(lctx.alloc, K_shift);
if (!ggml_allocr_is_measure(lctx.alloc)) {
int * data = (int *) K_shift->data;
for (int i = 0; i < n_ctx; ++i) {
data[i] = kv_self.cells[i].delta;
}
}

for (int il = 0; il < n_layer; ++il) {
ggml_format_name(inpL, "layer_inp_%d", il);

Expand All @@ -3115,6 +3128,18 @@ static struct ggml_cgraph * llm_build_baichaun(
ggml_set_name(cur, "attention_norm_0");
}

// shift the entire K-cache if needed
if (do_rope_shift) {
ggml_build_forward_expand(gf,
ggml_rope_custom_inplace(ctx0,
ggml_view_3d(ctx0, kv_self.k,
n_embd_head, n_head_kv, n_ctx,
ggml_element_size(kv_self.k)*n_embd_head,
ggml_element_size(kv_self.k)*n_embd_gqa,
ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il),
K_shift, n_embd_head, 0, 0, freq_base, freq_scale));
}

// self-attention
{
// compute Q and K and RoPE them
Expand Down Expand Up @@ -3362,6 +3387,8 @@ static struct ggml_cgraph * llm_build_falcon(
const int32_t n_tokens = batch.n_tokens;
const int32_t n_kv = llama_kv_cache_cell_max(kv_self);

const bool do_rope_shift = kv_self.has_shift || ggml_allocr_is_measure(lctx.alloc);

auto & buf_compute = lctx.buf_compute;

struct ggml_init_params params = {
Expand Down Expand Up @@ -3465,6 +3492,16 @@ static struct ggml_cgraph * llm_build_falcon(
}
}

// K_shift
struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
ggml_allocr_alloc(lctx.alloc, K_shift);
if (!ggml_allocr_is_measure(lctx.alloc)) {
int * data = (int *) K_shift->data;
for (int i = 0; i < n_ctx; ++i) {
data[i] = kv_self.cells[i].delta;
}
}

for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * attn_norm;

Expand All @@ -3476,6 +3513,18 @@ static struct ggml_cgraph * llm_build_falcon(
}
#endif // GGML_USE_CUBLAS

// shift the entire K-cache if needed
if (do_rope_shift) {
ggml_build_forward_expand(gf,
ggml_rope_custom_inplace(ctx0,
ggml_view_3d(ctx0, kv_self.k,
n_embd_head, n_head_kv, n_ctx,
ggml_element_size(kv_self.k)*n_embd_head,
ggml_element_size(kv_self.k)*n_embd_gqa,
ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il),
K_shift, n_embd_head, 2, 0, freq_base, freq_scale));
}

// self-attention
// TODO: refactor into common function (shared with LLaMA)
{
Expand Down

0 comments on commit 7c1bdd0

Please sign in to comment.