Skip to content

Commit

Permalink
llama : remove unused vars (ggerganov#4796)
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov authored and jordankanter committed Feb 3, 2024
1 parent 3b99c26 commit e46d268
Showing 1 changed file with 0 additions and 2 deletions.
2 changes: 0 additions & 2 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4997,7 +4997,6 @@ struct llm_build_context {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);

const int64_t n_embd_head = hparams.n_embd_head_v;
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);

const int64_t n_rot = n_embd_head_k / 2;
Expand Down Expand Up @@ -5210,7 +5209,6 @@ struct llm_build_context {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);

const int64_t n_embd_head = hparams.n_embd_head_v;
const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);

struct ggml_tensor * cur;
Expand Down

0 comments on commit e46d268

Please sign in to comment.