diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h index 5f3f1e286990e4..a6ea2b06a5e5ad 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -232,6 +232,11 @@ extern "C" { GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); GGML_API void ggml_backend_view_init(struct ggml_tensor * tensor); + // Utility to query whether cached GGML graph is in use + GGML_API bool ggml_use_cached_graph(ggml_backend_sched_t sched); + + // Set whether or not to use GGML graph caching + GGML_API void ggml_set_cached_graph(ggml_backend_sched_t sched, bool set_value); #ifdef __cplusplus } diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index e1d2b110fd347e..1219009c9dca86 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -571,6 +571,13 @@ extern "C" { GGML_TENSOR_FLAG_PARAM = 4, }; + // Flag (used on GGML_OP_CPY nodes) on whether node is associated with K or V cache + // enum ggml_kv_cache_flag { + // GGML_KV_CACHE_FLAG_NONE = 0, + // GGML_KV_CACHE_FLAG_K = 1, + // GGML_KV_CACHE_FLAG_V = 2 + // }; + // ggml object struct ggml_object { size_t offs; @@ -605,6 +612,8 @@ extern "C" { // op params - allocated as int32_t for alignment int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; + // enum ggml_kv_cache_flag kv_cache_flag; + int32_t flags; struct ggml_tensor * grad; @@ -620,7 +629,7 @@ extern "C" { void * extra; // extra things e.g. for ggml-cuda.cu - // char padding[4]; + //char padding[4]; }; static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor); diff --git a/ggml/src/ggml-backend.c b/ggml/src/ggml-backend.c index 8856967c911042..b96b720a07c340 100644 --- a/ggml/src/ggml-backend.c +++ b/ggml/src/ggml-backend.c @@ -1036,6 +1036,13 @@ struct ggml_backend_sched_split { struct ggml_cgraph graph; }; +// Object to facilitate GML graph caching +struct ggml_cached_graph { + bool is_active; + ggml_backend_t input_backend; + struct ggml_tensor * input_cpy[GGML_SCHED_MAX_SPLIT_INPUTS]; +}; + struct ggml_backend_sched { bool is_reset; // true if the scheduler has been reset since the last graph split bool is_alloc; @@ -1081,6 +1088,18 @@ struct ggml_backend_sched { size_t context_buffer_size; bool debug; + + // align context_buffer to GGML_MEM_ALIGN + +// #ifdef _MSC_VER + // __declspec(align(GGML_MEM_ALIGN)) +// #else + // __attribute__((aligned(GGML_MEM_ALIGN))) +// #endif + + // char context_buffer[GGML_SCHED_MAX_SPLITS*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + sizeof(struct ggml_cgraph)]; + + // struct ggml_cached_graph cached_graph; }; #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) @@ -1758,6 +1777,14 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s struct ggml_tensor * input = split->inputs[j]; struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); + if (!sched->cached_graph.is_active) { + sched->cached_graph.input_backend = input_backend; + sched->cached_graph.input_cpy[j] = input_cpy; + } + else { + input_backend = sched->cached_graph.input_backend; + input_cpy = sched->cached_graph.input_cpy[j]; + } if (input->flags & GGML_TENSOR_FLAG_INPUT) { // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done if (sched->events[split_backend_id][sched->cur_copy] != NULL) { @@ -1890,6 +1917,8 @@ ggml_backend_sched_t ggml_backend_sched_new( ggml_backend_sched_reset(sched); + sched->cached_graph.is_active = false; + return sched; } @@ -1966,6 +1995,9 @@ enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, st } enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { + + if(!sched->cached_graph.is_active) + { if (!sched->is_reset && !sched->is_alloc) { ggml_backend_sched_reset(sched); } @@ -1975,7 +2007,7 @@ enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sch return GGML_STATUS_ALLOC_FAILED; } } - + } return ggml_backend_sched_compute_splits(sched); } @@ -2240,3 +2272,12 @@ bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t return true; } + +bool ggml_use_cached_graph(ggml_backend_sched_t sched) { + return sched->cached_graph.is_active; +} + +void ggml_set_cached_graph(ggml_backend_sched_t sched, bool set_value) { + sched->cached_graph.is_active = set_value; +} + diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index aca1268cdc7e9e..97dbd937da5cef 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -3810,6 +3810,7 @@ static struct ggml_tensor * ggml_new_tensor_impl( /*.nb =*/ { 0, 0, 0, 0 }, /*.op =*/ GGML_OP_NONE, /*.op_params =*/ { 0 }, + // /*.kv_cache_flag=*/ GGML_KV_CACHE_FLAG_NONE, /*.flags =*/ 0, /*.grad =*/ NULL, /*.src =*/ { NULL }, diff --git a/src/llama.cpp b/src/llama.cpp index 0bb2bef116056d..085d317130fd22 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2716,6 +2716,17 @@ struct llama_model { } }; +// Object used to allow caching of GGML graph between tokens where possible. +struct ggml_cached_graph { + bool is_active = false; + ggml_cgraph * gf; + size_t n; + ggml_backend_t backend_res; + ggml_backend_t backend_embd; + struct ggml_tensor * res; + struct ggml_tensor * embd; +}; + struct llama_context { llama_context(const llama_model & model) : model(model) @@ -2816,6 +2827,10 @@ struct llama_context { struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch] struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc] struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch] + + // cached Cuda Graphs + struct ggml_cached_graph cached_graph; + }; struct llama_lora_weight { @@ -8058,7 +8073,7 @@ static void llm_build_kv_store( cb(k_cache_view, "k_cache_view", il); // note: storing RoPE-ed version of K in the KV cache - ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view)); + ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view)) assert(v_cur->ne[0] == n_embd_v_gqa && v_cur->ne[1] == n_tokens); @@ -8076,7 +8091,6 @@ static void llm_build_kv_store( v_cur = ggml_transpose(ctx, v_cur); } cb(v_cache_view, "v_cache_view", il); - ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur, v_cache_view)); } @@ -15198,12 +15212,44 @@ static int llama_decode_internal( ggml_backend_sched_reset(lctx.sched); ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data); - ggml_cgraph * gf = llama_build_graph(lctx, u_batch, false); - + ggml_cgraph * gf; // the output is always the last tensor in the graph - struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; - struct ggml_tensor * embd = gf->nodes[gf->n_nodes - 2]; + struct ggml_tensor * res; + struct ggml_tensor * embd; + + bool n_has_changed_since_last_token = false; + if(lctx.cached_graph.n != kv_self.n) n_has_changed_since_last_token = true; + lctx.cached_graph.n = kv_self.n; + + // Re-build graph only if graph caching is not possible + if(!ggml_use_cached_graph(lctx.sched) || n_has_changed_since_last_token) { + + gf = llama_build_graph(lctx, u_batch, false); + + // Set whether GGML graph caching is in use within GGML module, based on + // whether caching was activated here during the previous token + ggml_set_cached_graph(lctx.sched,lctx.cached_graph.is_active); + + // Disable future graph caching in presence of env var, + // if there are multiple devices, if batch size is greater than 1, + // or if nsplits is not 2. + // TO DO enable graph caching for these cases + bool disable_cached_ggml_graph = (getenv("GGML_DISABLE_GRAPH_CACHING") != nullptr) + || (llama_get_device_count(model) > 1) + || (ggml_backend_sched_get_n_splits(lctx.sched) != 2); + for (int i = 0 ; i < gf->n_nodes; i++) { + if (gf->nodes[i]->op == GGML_OP_ADD && gf->nodes[i]->src[1] && gf->nodes[i]->src[1]->ne[1] > 1) { + disable_cached_ggml_graph = true; + break; + } + } + + // Set whether graph caching should be used for future tokens + lctx.cached_graph.is_active=!disable_cached_ggml_graph; + // the output is always the last tensor in the graph + res = gf->nodes[gf->n_nodes - 1]; + embd = gf->nodes[gf->n_nodes - 2]; if (lctx.n_outputs == 0) { // no output res = nullptr; @@ -15222,10 +15268,59 @@ static int llama_decode_internal( embd = nullptr; // do not extract embeddings when not needed GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor"); } + lctx.cached_graph.res = res; + lctx.cached_graph.embd = embd; // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); ggml_backend_sched_alloc_graph(lctx.sched, gf); + } + else { + gf = lctx.cached_graph.gf; + res = lctx.cached_graph.res; + embd = lctx.cached_graph.embd; + } + lctx.cached_graph.gf = gf; + + // Update K and V cache parameters in cached graph. + if(gf != nullptr && gf->nodes != nullptr && ggml_use_cached_graph(lctx.sched)) { + + const struct llama_hparams & hparams = model.hparams; + const int64_t kv_head = kv_self.head; + + for (int i = 0; i < gf->n_nodes; i++) { + ggml_tensor * node = gf->nodes[i]; + if (node->op == GGML_OP_CPY) { + + // K cache + const char* k_prefix = "k_cache_view-"; + if (strncmp(node->src[1]->name, k_prefix, strlen(k_prefix)) == 0) { + int il = atoi(node->src[1]->name + strlen(k_prefix)); // Layer index from name + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + ggml_tensor * tmp_tensor = kv_self.k_l[il]; + size_t tmp_offset = (ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa))*kv_head; + node->src[1]->data = static_cast(tmp_tensor->data) + tmp_offset; + } + + // V cache + const char* v_prefix = "v_cache_view-"; + if (strncmp(node->src[1]->name, v_prefix, strlen(v_prefix)) == 0) { + int il = atoi(node->src[1]->name + strlen(v_prefix)); // Layer index from name + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + ggml_tensor * tmp_tensor = kv_self.v_l[il]; + size_t tmp_offset; + if (cparams.flash_attn) { + tmp_offset = (kv_head)*ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + } else { + tmp_offset = (kv_head)*ggml_element_size(kv_self.v_l[il]); + } + node->src[1]->data = static_cast(tmp_tensor->data) + tmp_offset; + } + } + } + + } + llama_set_inputs(lctx, u_batch); llama_graph_compute(lctx, gf, n_threads); @@ -15248,11 +15343,15 @@ static int llama_decode_internal( // extract logits if (res) { ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched, res); - GGML_ASSERT(backend_res != nullptr); - GGML_ASSERT(lctx.logits != nullptr); - float * logits_out = lctx.logits + n_outputs_prev*n_vocab; const int32_t n_outputs_new = lctx.n_outputs; + if(!ggml_use_cached_graph(lctx.sched)) + lctx.cached_graph.backend_res = backend_res; + else + backend_res = lctx.cached_graph.backend_res; + + GGML_ASSERT(backend_res != nullptr); + GGML_ASSERT(lctx.logits != nullptr); if (n_outputs_new) { GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs); @@ -15264,6 +15363,12 @@ static int llama_decode_internal( // extract embeddings if (embd) { ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd); + + + if(!ggml_use_cached_graph(lctx.sched)) + lctx.cached_graph.backend_embd = backend_embd; + else + backend_embd = lctx.cached_graph.backend_embd; GGML_ASSERT(backend_embd != nullptr); switch (cparams.pooling_type) {