From d919c6da2dfd1b22648967be7bd9d3b0fd1358dd Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 08:54:14 +0000 Subject: [PATCH 01/31] Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h --- Makefile | 4 +-- common/common.cpp | 27 ++++++++++++++++---- common/common.h | 2 +- ggml.c | 64 +++++++++++++++++++++++++++++++++++++++++++---- ggml.h | 16 ++++++++++-- llama.cpp | 10 ++++---- llama.h | 11 +++++++- 7 files changed, 113 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index ba73f063709c7..2c051068b488a 100644 --- a/Makefile +++ b/Makefile @@ -265,8 +265,8 @@ ifndef RISCV ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) # Use all CPU extensions that are available: - MK_CFLAGS += -march=native -mtune=native - HOST_CXXFLAGS += -march=native -mtune=native + MK_CFLAGS += -march=znver4 -mtune=znver4 + HOST_CXXFLAGS += -march=znver4 -mtune=znver4 # Usage AVX-only #MK_CFLAGS += -mfma -mf16c -mavx diff --git a/common/common.cpp b/common/common.cpp index 8c1a60583f276..c198706cc7840 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -666,7 +666,19 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { } else if (arg == "--no-mmap") { params.use_mmap = false; } else if (arg == "--numa") { - params.numa = true; + if (++i >= argc) { + invalid_param = true; + break; + } else { + std::string value(argv[i]); + /**/ if (value == "interleave" || value == "" ) { params.numa = LLAMA_NUMA_STRATEGY_INTERLEAVE; } + else if (value == "isolate") { params.numa = LLAMA_NUMA_STRATEGY_ISOLATE; } + else if (value == "numactl") { params.numa = LLAMA_NUMA_STRATEGY_NUMACTL; } +#ifdef GGUF_NUMA_MIRROR + else if (value == "mirror") { params.numa = LLAMA_NUMA_STRATEGY_MIRROR; } +#endif + else { invalid_param = true; break; } + } } else if (arg == "--verbose-prompt") { params.verbose_prompt = true; } else if (arg == "--no-display-prompt") { @@ -922,7 +934,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -tb N, --threads-batch N\n"); printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n"); printf(" -td N, --threads-draft N"); - printf(" number of threads to use during generation (default: same as --threads)"); + printf(" number of threads to use during generation (default: same as --threads)\n"); printf(" -tbd N, --threads-batch-draft N\n"); printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n"); printf(" -p PROMPT, --prompt PROMPT\n"); @@ -992,7 +1004,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks); printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n"); printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks); - printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base"); + printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base\n"); printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft); printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); @@ -1009,7 +1021,13 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { if (llama_supports_mmap()) { printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } - printf(" --numa attempt optimizations that help on some NUMA systems\n"); + printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); + printf(" - interleave: (default) spread execution evenly over all nodes\n"); + printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); + printf(" - numactl: use the CPU map provided my numactl\n"); +#ifdef GGML_NUMA_MIRROR + printf(" - mirror: attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); +#endif printf(" if run without this previously, it is recommended to drop the system page cache before using this\n"); printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n"); if (llama_supports_gpu_offload()) { @@ -1635,7 +1653,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false"); fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false"); - fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false"); fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride); fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present); diff --git a/common/common.h b/common/common.h index 62de25d6a287c..9b20c6f6fed1f 100644 --- a/common/common.h +++ b/common/common.h @@ -76,6 +76,7 @@ struct gpt_params { float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; + int32_t numa = LLAMA_NUMA_STRATEGY_DISABLED; // // sampling parameters struct llama_sampling_params sparams; @@ -134,7 +135,6 @@ struct gpt_params { bool logits_all = false; // return logits for all tokens in the batch bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory - bool numa = false; // attempt optimizations that help on some NUMA systems bool verbose_prompt = false; // print prompt tokens before generation bool display_prompt = true; // print prompt before generation bool infill = false; // use infill mode diff --git a/ggml.c b/ggml.c index b9ec0c981b630..c9a5f4b8890aa 100644 --- a/ggml.c +++ b/ggml.c @@ -24,6 +24,10 @@ #include #include +#ifdef GGML_NUMA_MIRROR +#include +#endif + #ifdef GGML_USE_METAL #include #endif @@ -1912,9 +1916,12 @@ struct ggml_numa_node { }; struct ggml_numa_nodes { + uint32_t numa_strategy; struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system + uint32_t current_node; // node on which main process is execting + cpu_set_t cpuset; // cpuset from numactl }; // @@ -1948,7 +1955,7 @@ inline static void ggml_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } -void ggml_numa_init(void) { +void ggml_numa_init(uint32_t numa_flag) { if (g_state.numa.n_nodes > 0) { fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); @@ -1960,6 +1967,13 @@ void ggml_numa_init(void) { char path[256]; int rv; + // set numa scheme + g_state.numa.numa_strategy = numa_flag; + + GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy); + + g_state.numa.cpuset = ggml_get_numa_affinity(); + // enumerate nodes while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) { rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes); @@ -1978,11 +1992,17 @@ void ggml_numa_init(void) { GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); - if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) { + // figure out which node we're on + uint current_cpu; + int getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node); + + if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) { g_state.numa.n_nodes = 0; return; } + GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu); + for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) { struct ggml_numa_node * node = &g_state.numa.nodes[n]; GGML_PRINT_DEBUG("CPUs on node %u:", n); @@ -2013,6 +2033,15 @@ void ggml_numa_init(void) { #endif } +cpu_set_t ggml_get_numa_affinity(void) { + cpu_set_t cpuset; + pthread_t thread; + thread = pthread_self(); + CPU_ZERO(&cpuset); + int ret = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); + return cpuset; +} + bool ggml_is_numa(void) { return g_state.numa.n_nodes > 1; } @@ -16587,11 +16616,36 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { return; } - // run thread on node_num thread_n / (threads per node) - const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes); - struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; + int node_num; size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); + switch(g_state.numa.numa_strategy) { + case GGML_NUMA_STRATEGY_INTERLEAVE: + // run thread on node_num thread_n / (threads per node) + node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes); + break; + case GGML_NUMA_STRATEGY_ISOLATE: + // run thread on current_node + node_num = g_state.numa.current_node; + break; + case GGML_NUMA_STRATEGY_NUMACTL: + // use the cpuset that numactl gave us + int rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); + if (rv) { + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", + strerror(rv)); + } + return; +#ifdef GGML_NUMA_MIRROR + case GGML_NUMA_STRATEGY_MIRROR: + printf("Mirror Mode Enabled"); +#endif + default: + return; + } + + struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; + cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); CPU_ZERO_S(setsize, cpus); for (size_t i = 0; i < node->n_cpus; ++i) { diff --git a/ggml.h b/ggml.h index e0a4799f3bd0a..44c45d4ef6bda 100644 --- a/ggml.h +++ b/ggml.h @@ -217,6 +217,7 @@ #include #include #include +#include #define GGML_FILE_MAGIC 0x67676d6c // "ggml" #define GGML_FILE_VERSION 1 @@ -647,6 +648,16 @@ extern "C" { void * wdata; }; + // numa strategies + enum ggml_numa_strategies { + GGML_NUMA_STRATEGY_DISABLED = 0, + GGML_NUMA_STRATEGY_INTERLEAVE = 1, + GGML_NUMA_STRATEGY_ISOLATE = 2, + GGML_NUMA_STRATEGY_NUMACTL = 3, + GGML_NUMA_STRATEGY_MIRROR = 4, + GGML_NUMA_STRATEGY_MAX_VALUE = GGML_NUMA_STRATEGY_MIRROR, + }; + // misc GGML_API void ggml_time_init(void); // call this once at the beginning of the program @@ -657,8 +668,9 @@ extern "C" { GGML_API void ggml_print_backtrace(void); - GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems - GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node + GGML_API void ggml_numa_init(uint32_t numa); // call once for better performance on NUMA systems + GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node + GGML_API cpu_set_t ggml_get_numa_affinity(void); // get cpuset from numactl GGML_API void ggml_print_object (const struct ggml_object * obj); GGML_API void ggml_print_objects(const struct ggml_context * ctx); diff --git a/llama.cpp b/llama.cpp index 65e399adca60e..4358aae43ac3f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -949,7 +949,7 @@ struct llama_mmap { int fd = fileno(file->fp); int flags = MAP_SHARED; // prefetch/readahead impairs performance on NUMA systems - if (numa) { prefetch = 0; } + if (numa > 0) { prefetch = 0; } #ifdef __linux__ // advise the kernel to read the file sequentially (increases readahead) if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) { @@ -970,7 +970,7 @@ struct llama_mmap { strerror(errno)); } } - if (numa) { + if (numa > 0) { // advise the kernel not to use readahead // (because the next page might not belong on the same node) if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) { @@ -10327,7 +10327,7 @@ bool llama_mlock_supported(void) { return llama_supports_mlock(); } -void llama_backend_init(bool numa) { +void llama_backend_init(uint32_t numa) { ggml_time_init(); // needed to initialize f16 tables @@ -10337,8 +10337,8 @@ void llama_backend_init(bool numa) { ggml_free(ctx); } - if (numa) { - ggml_numa_init(); + if (numa > 0) { + ggml_numa_init(numa); } #ifdef GGML_USE_MPI diff --git a/llama.h b/llama.h index cec4158bc8e80..378730b423048 100644 --- a/llama.h +++ b/llama.h @@ -111,6 +111,15 @@ extern "C" { LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN, }; + enum llama_numa_strategies { + LLAMA_NUMA_STRATEGY_DISABLED = 0, + LLAMA_NUMA_STRATEGY_INTERLEAVE = 1, + LLAMA_NUMA_STRATEGY_ISOLATE = 2, + LLAMA_NUMA_STRATEGY_NUMACTL = 3, + LLAMA_NUMA_STRATEGY_MIRROR = 4, + LLAMA_NUMA_STRATEGY_MAX_VALUE = LLAMA_NUMA_STRATEGY_MIRROR, + }; + enum llama_split_mode { LLAMA_SPLIT_NONE = 0, // single GPU LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs @@ -304,7 +313,7 @@ extern "C" { // Initialize the llama + ggml backend // If numa is true, use NUMA optimizations // Call once at the start of the program - LLAMA_API void llama_backend_init(bool numa); + LLAMA_API void llama_backend_init(uint32_t numa); // Call once at the end of the program - currently only used for MPI LLAMA_API void llama_backend_free(void); From 65792fa4079a40a3e47bfab1794de3656b45fc32 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 09:08:57 +0000 Subject: [PATCH 02/31] Reverted Makefile --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 2c051068b488a..ba73f063709c7 100644 --- a/Makefile +++ b/Makefile @@ -265,8 +265,8 @@ ifndef RISCV ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) # Use all CPU extensions that are available: - MK_CFLAGS += -march=znver4 -mtune=znver4 - HOST_CXXFLAGS += -march=znver4 -mtune=znver4 + MK_CFLAGS += -march=native -mtune=native + HOST_CXXFLAGS += -march=native -mtune=native # Usage AVX-only #MK_CFLAGS += -mfma -mf16c -mavx From 592e4519bb50f66cb749c9177d5f803c8158b40a Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 09:10:55 +0000 Subject: [PATCH 03/31] Fixed include --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index c9a5f4b8890aa..6ded00b736888 100644 --- a/ggml.c +++ b/ggml.c @@ -25,7 +25,7 @@ #include #ifdef GGML_NUMA_MIRROR -#include +#include #endif #ifdef GGML_USE_METAL From a69d6e2b91b98725f0c0310578f2f3adffa23e75 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 22:23:34 +0000 Subject: [PATCH 04/31] Removed sched.h from ggml.h, moved ggml_get_numa_affinity into ggml.c, removed trailing whitespace and fixed up a few inconsistent variables --- common/common.cpp | 14 -------------- ggml.c | 29 +++++++++++++---------------- ggml.h | 2 -- 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index c198706cc7840..efbdd00e2048f 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -399,18 +399,6 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } sparams.penalty_present = std::stof(argv[i]); - } else if (arg == "--dynatemp-range") { - if (++i >= argc) { - invalid_param = true; - break; - } - sparams.dynatemp_range = std::stof(argv[i]); - } else if (arg == "--dynatemp-exp") { - if (++i >= argc) { - invalid_param = true; - break; - } - sparams.dynatemp_exponent = std::stof(argv[i]); } else if (arg == "--mirostat") { if (++i >= argc) { invalid_param = true; @@ -966,8 +954,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --repeat-penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)sparams.penalty_repeat); printf(" --presence-penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_present); printf(" --frequency-penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_freq); - printf(" --dynatemp-range N dynamic temperature range (default: %.1f, 0.0 = disabled)\n", (double)sparams.dynatemp_range); - printf(" --dynatemp-exp N dynamic temperature exponent (default: %.1f)\n", (double)sparams.dynatemp_exponent); printf(" --mirostat N use Mirostat sampling.\n"); printf(" Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"); printf(" (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", sparams.mirostat); diff --git a/ggml.c b/ggml.c index 6ded00b736888..4aaf193c2bda2 100644 --- a/ggml.c +++ b/ggml.c @@ -25,7 +25,7 @@ #include #ifdef GGML_NUMA_MIRROR -#include +#include #endif #ifdef GGML_USE_METAL @@ -1955,6 +1955,8 @@ inline static void ggml_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } +cpu_set_t ggml_get_numa_affinity(void); // get cpuset from numactl + void ggml_numa_init(uint32_t numa_flag) { if (g_state.numa.n_nodes > 0) { fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); @@ -2038,7 +2040,7 @@ cpu_set_t ggml_get_numa_affinity(void) { pthread_t thread; thread = pthread_self(); CPU_ZERO(&cpuset); - int ret = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); + pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); return cpuset; } @@ -2499,8 +2501,7 @@ size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) { size_t max_size = 0; for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) { - size_t bytes = ggml_nbytes(tensor); - max_size = MAX(max_size, bytes); + max_size = MAX(max_size, ggml_nbytes(tensor)); } return max_size; @@ -11917,10 +11918,8 @@ GGML_CALL void ggml_rope_yarn_corr_dims( int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] ) { // start and end correction dims - float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base)); - float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base)); - dims[0] = MAX(0, start); - dims[1] = MIN(n_dims - 1, end); + dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base))); + dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base))); } static void ggml_compute_forward_rope_f32( @@ -16617,6 +16616,7 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { } int node_num; + int rv; size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); switch(g_state.numa.numa_strategy) { @@ -16630,10 +16630,9 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { break; case GGML_NUMA_STRATEGY_NUMACTL: // use the cpuset that numactl gave us - int rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); + rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", - strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } return; #ifdef GGML_NUMA_MIRROR @@ -16652,10 +16651,9 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { CPU_SET_S(node->cpus[i], setsize, cpus); } - int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); + rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", - strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } CPU_FREE(cpus); @@ -16676,8 +16674,7 @@ static void clear_numa_thread_affinity(void) { int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", - strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } CPU_FREE(cpus); diff --git a/ggml.h b/ggml.h index 44c45d4ef6bda..3e3bb0e214364 100644 --- a/ggml.h +++ b/ggml.h @@ -217,7 +217,6 @@ #include #include #include -#include #define GGML_FILE_MAGIC 0x67676d6c // "ggml" #define GGML_FILE_VERSION 1 @@ -670,7 +669,6 @@ extern "C" { GGML_API void ggml_numa_init(uint32_t numa); // call once for better performance on NUMA systems GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node - GGML_API cpu_set_t ggml_get_numa_affinity(void); // get cpuset from numactl GGML_API void ggml_print_object (const struct ggml_object * obj); GGML_API void ggml_print_objects(const struct ggml_context * ctx); From 60b80b0e8a899763c94edd418bb042b2ae287486 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 22:27:38 +0000 Subject: [PATCH 05/31] removed trailing whitespace --- common/common.cpp | 2 +- common/common.h | 2 +- ggml.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index efbdd00e2048f..55fec92118070 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -655,7 +655,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { params.use_mmap = false; } else if (arg == "--numa") { if (++i >= argc) { - invalid_param = true; + invalid_param = true; break; } else { std::string value(argv[i]); diff --git a/common/common.h b/common/common.h index 9b20c6f6fed1f..2c864c04cbb20 100644 --- a/common/common.h +++ b/common/common.h @@ -76,7 +76,7 @@ struct gpt_params { float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; - int32_t numa = LLAMA_NUMA_STRATEGY_DISABLED; + int32_t numa = LLAMA_NUMA_STRATEGY_DISABLED; // // sampling parameters struct llama_sampling_params sparams; diff --git a/ggml.c b/ggml.c index 4aaf193c2bda2..6922934e33a0c 100644 --- a/ggml.c +++ b/ggml.c @@ -16630,7 +16630,7 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { break; case GGML_NUMA_STRATEGY_NUMACTL: // use the cpuset that numactl gave us - rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); + rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); if (rv) { fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } From 7aa974de5e7f14405ee33d839374e9657b52db4d Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 22:43:13 +0000 Subject: [PATCH 06/31] Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h --- Makefile | 4 +-- common/common.cpp | 27 +++++++++++++---- common/common.h | 2 +- ggml.c | 74 ++++++++++++++++++++++++++++++++++++++++------- ggml.h | 14 +++++++-- llama.cpp | 10 +++---- llama.h | 11 ++++++- 7 files changed, 116 insertions(+), 26 deletions(-) diff --git a/Makefile b/Makefile index ba73f063709c7..2c051068b488a 100644 --- a/Makefile +++ b/Makefile @@ -265,8 +265,8 @@ ifndef RISCV ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) # Use all CPU extensions that are available: - MK_CFLAGS += -march=native -mtune=native - HOST_CXXFLAGS += -march=native -mtune=native + MK_CFLAGS += -march=znver4 -mtune=znver4 + HOST_CXXFLAGS += -march=znver4 -mtune=znver4 # Usage AVX-only #MK_CFLAGS += -mfma -mf16c -mavx diff --git a/common/common.cpp b/common/common.cpp index 8c1a60583f276..c198706cc7840 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -666,7 +666,19 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { } else if (arg == "--no-mmap") { params.use_mmap = false; } else if (arg == "--numa") { - params.numa = true; + if (++i >= argc) { + invalid_param = true; + break; + } else { + std::string value(argv[i]); + /**/ if (value == "interleave" || value == "" ) { params.numa = LLAMA_NUMA_STRATEGY_INTERLEAVE; } + else if (value == "isolate") { params.numa = LLAMA_NUMA_STRATEGY_ISOLATE; } + else if (value == "numactl") { params.numa = LLAMA_NUMA_STRATEGY_NUMACTL; } +#ifdef GGUF_NUMA_MIRROR + else if (value == "mirror") { params.numa = LLAMA_NUMA_STRATEGY_MIRROR; } +#endif + else { invalid_param = true; break; } + } } else if (arg == "--verbose-prompt") { params.verbose_prompt = true; } else if (arg == "--no-display-prompt") { @@ -922,7 +934,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -tb N, --threads-batch N\n"); printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n"); printf(" -td N, --threads-draft N"); - printf(" number of threads to use during generation (default: same as --threads)"); + printf(" number of threads to use during generation (default: same as --threads)\n"); printf(" -tbd N, --threads-batch-draft N\n"); printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n"); printf(" -p PROMPT, --prompt PROMPT\n"); @@ -992,7 +1004,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks); printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n"); printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks); - printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base"); + printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base\n"); printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft); printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); @@ -1009,7 +1021,13 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { if (llama_supports_mmap()) { printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } - printf(" --numa attempt optimizations that help on some NUMA systems\n"); + printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); + printf(" - interleave: (default) spread execution evenly over all nodes\n"); + printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); + printf(" - numactl: use the CPU map provided my numactl\n"); +#ifdef GGML_NUMA_MIRROR + printf(" - mirror: attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); +#endif printf(" if run without this previously, it is recommended to drop the system page cache before using this\n"); printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n"); if (llama_supports_gpu_offload()) { @@ -1635,7 +1653,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false"); fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false"); - fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false"); fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride); fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present); diff --git a/common/common.h b/common/common.h index 62de25d6a287c..2c864c04cbb20 100644 --- a/common/common.h +++ b/common/common.h @@ -76,6 +76,7 @@ struct gpt_params { float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; + int32_t numa = LLAMA_NUMA_STRATEGY_DISABLED; // // sampling parameters struct llama_sampling_params sparams; @@ -134,7 +135,6 @@ struct gpt_params { bool logits_all = false; // return logits for all tokens in the batch bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory - bool numa = false; // attempt optimizations that help on some NUMA systems bool verbose_prompt = false; // print prompt tokens before generation bool display_prompt = true; // print prompt before generation bool infill = false; // use infill mode diff --git a/ggml.c b/ggml.c index b9ec0c981b630..4475d035977c1 100644 --- a/ggml.c +++ b/ggml.c @@ -24,6 +24,10 @@ #include #include +#ifdef GGML_NUMA_MIRROR +#include +#endif + #ifdef GGML_USE_METAL #include #endif @@ -1912,9 +1916,12 @@ struct ggml_numa_node { }; struct ggml_numa_nodes { + uint32_t numa_strategy; struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system + uint32_t current_node; // node on which main process is execting + cpu_set_t cpuset; // cpuset from numactl }; // @@ -1948,7 +1955,9 @@ inline static void ggml_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } -void ggml_numa_init(void) { +cpu_set_t ggml_get_numa_affinity(void); // get cpuset from numactl + +void ggml_numa_init(uint32_t numa_flag) { if (g_state.numa.n_nodes > 0) { fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); @@ -1960,6 +1969,13 @@ void ggml_numa_init(void) { char path[256]; int rv; + // set numa scheme + g_state.numa.numa_strategy = numa_flag; + + GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy); + + g_state.numa.cpuset = ggml_get_numa_affinity(); + // enumerate nodes while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) { rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes); @@ -1978,11 +1994,17 @@ void ggml_numa_init(void) { GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); - if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) { + // figure out which node we're on + uint current_cpu; + int getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node); + + if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) { g_state.numa.n_nodes = 0; return; } + GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu); + for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) { struct ggml_numa_node * node = &g_state.numa.nodes[n]; GGML_PRINT_DEBUG("CPUs on node %u:", n); @@ -2013,6 +2035,15 @@ void ggml_numa_init(void) { #endif } +cpu_set_t ggml_get_numa_affinity(void) { + cpu_set_t cpuset; + pthread_t thread; + thread = pthread_self(); + CPU_ZERO(&cpuset); + pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); + return cpuset; +} + bool ggml_is_numa(void) { return g_state.numa.n_nodes > 1; } @@ -16587,21 +16618,45 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { return; } - // run thread on node_num thread_n / (threads per node) - const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes); - struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; + int node_num; + int rv; size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); + switch(g_state.numa.numa_strategy) { + case GGML_NUMA_STRATEGY_INTERLEAVE: + // run thread on node_num thread_n / (threads per node) + node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes); + break; + case GGML_NUMA_STRATEGY_ISOLATE: + // run thread on current_node + node_num = g_state.numa.current_node; + break; + case GGML_NUMA_STRATEGY_NUMACTL: + // use the cpuset that numactl gave us + rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); + if (rv) { + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); + } + return; +#ifdef GGML_NUMA_MIRROR + case GGML_NUMA_STRATEGY_MIRROR: + printf("Mirror Mode Enabled"); +#endif + default: + return; + } + + struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; + cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); CPU_ZERO_S(setsize, cpus); for (size_t i = 0; i < node->n_cpus; ++i) { CPU_SET_S(node->cpus[i], setsize, cpus); } - int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); + rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", - strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } CPU_FREE(cpus); @@ -16622,8 +16677,7 @@ static void clear_numa_thread_affinity(void) { int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", - strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } CPU_FREE(cpus); diff --git a/ggml.h b/ggml.h index e0a4799f3bd0a..3e3bb0e214364 100644 --- a/ggml.h +++ b/ggml.h @@ -647,6 +647,16 @@ extern "C" { void * wdata; }; + // numa strategies + enum ggml_numa_strategies { + GGML_NUMA_STRATEGY_DISABLED = 0, + GGML_NUMA_STRATEGY_INTERLEAVE = 1, + GGML_NUMA_STRATEGY_ISOLATE = 2, + GGML_NUMA_STRATEGY_NUMACTL = 3, + GGML_NUMA_STRATEGY_MIRROR = 4, + GGML_NUMA_STRATEGY_MAX_VALUE = GGML_NUMA_STRATEGY_MIRROR, + }; + // misc GGML_API void ggml_time_init(void); // call this once at the beginning of the program @@ -657,8 +667,8 @@ extern "C" { GGML_API void ggml_print_backtrace(void); - GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems - GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node + GGML_API void ggml_numa_init(uint32_t numa); // call once for better performance on NUMA systems + GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node GGML_API void ggml_print_object (const struct ggml_object * obj); GGML_API void ggml_print_objects(const struct ggml_context * ctx); diff --git a/llama.cpp b/llama.cpp index 65e399adca60e..4358aae43ac3f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -949,7 +949,7 @@ struct llama_mmap { int fd = fileno(file->fp); int flags = MAP_SHARED; // prefetch/readahead impairs performance on NUMA systems - if (numa) { prefetch = 0; } + if (numa > 0) { prefetch = 0; } #ifdef __linux__ // advise the kernel to read the file sequentially (increases readahead) if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) { @@ -970,7 +970,7 @@ struct llama_mmap { strerror(errno)); } } - if (numa) { + if (numa > 0) { // advise the kernel not to use readahead // (because the next page might not belong on the same node) if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) { @@ -10327,7 +10327,7 @@ bool llama_mlock_supported(void) { return llama_supports_mlock(); } -void llama_backend_init(bool numa) { +void llama_backend_init(uint32_t numa) { ggml_time_init(); // needed to initialize f16 tables @@ -10337,8 +10337,8 @@ void llama_backend_init(bool numa) { ggml_free(ctx); } - if (numa) { - ggml_numa_init(); + if (numa > 0) { + ggml_numa_init(numa); } #ifdef GGML_USE_MPI diff --git a/llama.h b/llama.h index cec4158bc8e80..378730b423048 100644 --- a/llama.h +++ b/llama.h @@ -111,6 +111,15 @@ extern "C" { LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN, }; + enum llama_numa_strategies { + LLAMA_NUMA_STRATEGY_DISABLED = 0, + LLAMA_NUMA_STRATEGY_INTERLEAVE = 1, + LLAMA_NUMA_STRATEGY_ISOLATE = 2, + LLAMA_NUMA_STRATEGY_NUMACTL = 3, + LLAMA_NUMA_STRATEGY_MIRROR = 4, + LLAMA_NUMA_STRATEGY_MAX_VALUE = LLAMA_NUMA_STRATEGY_MIRROR, + }; + enum llama_split_mode { LLAMA_SPLIT_NONE = 0, // single GPU LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs @@ -304,7 +313,7 @@ extern "C" { // Initialize the llama + ggml backend // If numa is true, use NUMA optimizations // Call once at the start of the program - LLAMA_API void llama_backend_init(bool numa); + LLAMA_API void llama_backend_init(uint32_t numa); // Call once at the end of the program - currently only used for MPI LLAMA_API void llama_backend_free(void); From 12789eb308e1a22395cb070f9eb77cab4a6dabfb Mon Sep 17 00:00:00 2001 From: root Date: Tue, 6 Feb 2024 22:45:21 +0000 Subject: [PATCH 07/31] Reverting Makefile --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 2c051068b488a..ba73f063709c7 100644 --- a/Makefile +++ b/Makefile @@ -265,8 +265,8 @@ ifndef RISCV ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) # Use all CPU extensions that are available: - MK_CFLAGS += -march=znver4 -mtune=znver4 - HOST_CXXFLAGS += -march=znver4 -mtune=znver4 + MK_CFLAGS += -march=native -mtune=native + HOST_CXXFLAGS += -march=native -mtune=native # Usage AVX-only #MK_CFLAGS += -mfma -mf16c -mavx From c43808c6250c807098629323fab29d2307176e10 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 7 Feb 2024 19:49:07 +0000 Subject: [PATCH 08/31] Fixed a number of issues with the move from BOOL to ggml_numa_strategies. Added a note about mirror mode note being implemented yet --- common/common.cpp | 10 +++---- common/common.h | 2 +- examples/llama-bench/llama-bench.cpp | 2 +- .../app/src/main/cpp/llama-android.cpp | 2 +- examples/quantize/quantize.cpp | 2 +- examples/server/server.cpp | 26 +++++++++++++++++-- examples/tokenize/tokenize.cpp | 2 +- ggml.c | 2 +- llama.cpp | 2 +- llama.h | 11 +------- 10 files changed, 37 insertions(+), 24 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 55fec92118070..0f5fc11a757ec 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -659,11 +659,11 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } else { std::string value(argv[i]); - /**/ if (value == "interleave" || value == "" ) { params.numa = LLAMA_NUMA_STRATEGY_INTERLEAVE; } - else if (value == "isolate") { params.numa = LLAMA_NUMA_STRATEGY_ISOLATE; } - else if (value == "numactl") { params.numa = LLAMA_NUMA_STRATEGY_NUMACTL; } + /**/ if (value == "interleave" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_INTERLEAVE; } + else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } + else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } #ifdef GGUF_NUMA_MIRROR - else if (value == "mirror") { params.numa = LLAMA_NUMA_STRATEGY_MIRROR; } + else if (value == "mirror") { params.numa = GGML_NUMA_STRATEGY_MIRROR; } #endif else { invalid_param = true; break; } } @@ -1012,7 +1012,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); printf(" - numactl: use the CPU map provided my numactl\n"); #ifdef GGML_NUMA_MIRROR - printf(" - mirror: attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); + printf(" - mirror: NOT YET IMPLEMENTED - attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); #endif printf(" if run without this previously, it is recommended to drop the system page cache before using this\n"); printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n"); diff --git a/common/common.h b/common/common.h index 2c864c04cbb20..58500d920760b 100644 --- a/common/common.h +++ b/common/common.h @@ -76,7 +76,7 @@ struct gpt_params { float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; - int32_t numa = LLAMA_NUMA_STRATEGY_DISABLED; + ggml_numa_strategies numa = GGML_NUMA_STRATEGY_DISABLED; // // sampling parameters struct llama_sampling_params sparams; diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index ddb0ba064b0eb..2a4728612cb31 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1151,7 +1151,7 @@ int main(int argc, char ** argv) { if (!params.verbose) { llama_log_set(llama_null_log_callback, NULL); } - bool numa = false; + enum ggml_numa_strategies numa = GGML_NUMA_STRATEGY_DISABLED; llama_backend_init(numa); // initialize printer diff --git a/examples/llama.android/app/src/main/cpp/llama-android.cpp b/examples/llama.android/app/src/main/cpp/llama-android.cpp index d5e705dce6ca0..e2c2dc8367cf4 100644 --- a/examples/llama.android/app/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/app/src/main/cpp/llama-android.cpp @@ -274,7 +274,7 @@ Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint emb extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject, jboolean numa) { +Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject, jint32 numa) { llama_backend_init(numa); } diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 85f403ffc9599..5f1e3e71bce1a 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -237,7 +237,7 @@ int main(int argc, char ** argv) { params.imatrix = &imatrix_data; } - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); // parse command line arguments const std::string fname_inp = argv[arg_idx]; diff --git a/examples/server/server.cpp b/examples/server/server.cpp index fc7e723a13573..3f3295ee464c9 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1821,7 +1821,13 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, { printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } - printf(" --numa attempt optimizations that help on some NUMA systems\n"); + printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); + printf(" - interleave: (default) spread execution evenly over all nodes\n"); + printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); + printf(" - numactl: use the CPU map provided my numactl\n"); +#ifdef GGML_NUMA_MIRROR + printf(" - mirror: NOT YET IMPLEMENTED - attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); +#endif if (llama_supports_gpu_offload()) { printf(" -ngl N, --n-gpu-layers N\n"); printf(" number of layers to store in VRAM\n"); @@ -2228,9 +2234,25 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } + else if (arg == "--numa") { + if (++i >= argc) { + invalid_param = true; + break; + } else { + std::string value(argv[i]); + /**/ if (value == "interleave" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_INTERLEAVE; } + else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } + else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } +#ifdef GGUF_NUMA_MIRROR + else if (value == "mirror") { params.numa = GGML_NUMA_STRATEGY_MIRROR; } +#endif + else { invalid_param = true; break; } + } + } + else if (arg == "--numa") { - params.numa = true; + params.numa = GGML_NUMA_STRATEGY_DISABLED; } else if (arg == "--embedding") { diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp index 4ff8e3fa72749..9fdcfc9dc59d7 100644 --- a/examples/tokenize/tokenize.cpp +++ b/examples/tokenize/tokenize.cpp @@ -17,7 +17,7 @@ int main(int argc, char ** argv) { const bool printing_ids = argc > 3 && std::string(argv[3]) == "--ids"; - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); llama_model_params model_params = llama_model_default_params(); model_params.vocab_only = true; diff --git a/ggml.c b/ggml.c index 6922934e33a0c..48e156a5bc2b3 100644 --- a/ggml.c +++ b/ggml.c @@ -25,7 +25,7 @@ #include #ifdef GGML_NUMA_MIRROR -#include +#include #endif #ifdef GGML_USE_METAL diff --git a/llama.cpp b/llama.cpp index 4358aae43ac3f..e3dc329e3014f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -10327,7 +10327,7 @@ bool llama_mlock_supported(void) { return llama_supports_mlock(); } -void llama_backend_init(uint32_t numa) { +void llama_backend_init(enum ggml_numa_strategies numa) { ggml_time_init(); // needed to initialize f16 tables diff --git a/llama.h b/llama.h index 378730b423048..70b8f5d685252 100644 --- a/llama.h +++ b/llama.h @@ -111,15 +111,6 @@ extern "C" { LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN, }; - enum llama_numa_strategies { - LLAMA_NUMA_STRATEGY_DISABLED = 0, - LLAMA_NUMA_STRATEGY_INTERLEAVE = 1, - LLAMA_NUMA_STRATEGY_ISOLATE = 2, - LLAMA_NUMA_STRATEGY_NUMACTL = 3, - LLAMA_NUMA_STRATEGY_MIRROR = 4, - LLAMA_NUMA_STRATEGY_MAX_VALUE = LLAMA_NUMA_STRATEGY_MIRROR, - }; - enum llama_split_mode { LLAMA_SPLIT_NONE = 0, // single GPU LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs @@ -313,7 +304,7 @@ extern "C" { // Initialize the llama + ggml backend // If numa is true, use NUMA optimizations // Call once at the start of the program - LLAMA_API void llama_backend_init(uint32_t numa); + LLAMA_API void llama_backend_init(enum ggml_numa_strategies numa); // Call once at the end of the program - currently only used for MPI LLAMA_API void llama_backend_free(void); From 61c37ba93cf56ef5020c4008ce8b8d483938085f Mon Sep 17 00:00:00 2001 From: root Date: Wed, 7 Feb 2024 21:46:19 +0000 Subject: [PATCH 09/31] Removing MIRROR_MODE code for this PR --- common/common.cpp | 6 ------ examples/server/server.cpp | 3 --- ggml.c | 8 -------- 3 files changed, 17 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 0f5fc11a757ec..90cbe94a3ced3 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -662,9 +662,6 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { /**/ if (value == "interleave" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_INTERLEAVE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } -#ifdef GGUF_NUMA_MIRROR - else if (value == "mirror") { params.numa = GGML_NUMA_STRATEGY_MIRROR; } -#endif else { invalid_param = true; break; } } } else if (arg == "--verbose-prompt") { @@ -1011,9 +1008,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" - interleave: (default) spread execution evenly over all nodes\n"); printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); printf(" - numactl: use the CPU map provided my numactl\n"); -#ifdef GGML_NUMA_MIRROR - printf(" - mirror: NOT YET IMPLEMENTED - attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); -#endif printf(" if run without this previously, it is recommended to drop the system page cache before using this\n"); printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n"); if (llama_supports_gpu_offload()) { diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 0c9230d0f2039..7ed10d564cdf2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1825,9 +1825,6 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf(" - interleave: (default) spread execution evenly over all nodes\n"); printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); printf(" - numactl: use the CPU map provided my numactl\n"); -#ifdef GGML_NUMA_MIRROR - printf(" - mirror: NOT YET IMPLEMENTED - attempt to mirror GGUF data buffer on each node's local memory to increase throughput.\n"); -#endif if (llama_supports_gpu_offload()) { printf(" -ngl N, --n-gpu-layers N\n"); printf(" number of layers to store in VRAM\n"); diff --git a/ggml.c b/ggml.c index 48e156a5bc2b3..3d8f203896f8e 100644 --- a/ggml.c +++ b/ggml.c @@ -24,10 +24,6 @@ #include #include -#ifdef GGML_NUMA_MIRROR -#include -#endif - #ifdef GGML_USE_METAL #include #endif @@ -16635,10 +16631,6 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); } return; -#ifdef GGML_NUMA_MIRROR - case GGML_NUMA_STRATEGY_MIRROR: - printf("Mirror Mode Enabled"); -#endif default: return; } From d47f232fc1a436b90068eda6aacc1c79f687d342 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 7 Feb 2024 22:02:21 +0000 Subject: [PATCH 10/31] Removing last bit of MIRROR_MODE code for this PR --- examples/server/server.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 7ed10d564cdf2..17eca9f679fca 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2240,9 +2240,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, /**/ if (value == "interleave" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_INTERLEAVE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } -#ifdef GGUF_NUMA_MIRROR - else if (value == "mirror") { params.numa = GGML_NUMA_STRATEGY_MIRROR; } -#endif else { invalid_param = true; break; } } } From 783b7ca02da377a04b8ea0a15c96134704726201 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 7 Feb 2024 22:28:29 +0000 Subject: [PATCH 11/31] Removing unneeded branch in server.cpp example and moving get_numa_affinity and making it static --- examples/server/server.cpp | 5 ----- ggml.c | 18 ++++++++---------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 17eca9f679fca..0db4795bc5d2e 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2243,11 +2243,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, else { invalid_param = true; break; } } } - - else if (arg == "--numa") - { - params.numa = GGML_NUMA_STRATEGY_DISABLED; - } else if (arg == "--embedding") { params.embedding = true; diff --git a/ggml.c b/ggml.c index 3d8f203896f8e..1bc8437c0468e 100644 --- a/ggml.c +++ b/ggml.c @@ -1951,7 +1951,14 @@ inline static void ggml_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } -cpu_set_t ggml_get_numa_affinity(void); // get cpuset from numactl +static cpu_set_t ggml_get_numa_affinity(void) { + cpu_set_t cpuset; + pthread_t thread; + thread = pthread_self(); + CPU_ZERO(&cpuset); + pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); + return cpuset; +} void ggml_numa_init(uint32_t numa_flag) { if (g_state.numa.n_nodes > 0) { @@ -2031,15 +2038,6 @@ void ggml_numa_init(uint32_t numa_flag) { #endif } -cpu_set_t ggml_get_numa_affinity(void) { - cpu_set_t cpuset; - pthread_t thread; - thread = pthread_self(); - CPU_ZERO(&cpuset); - pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); - return cpuset; -} - bool ggml_is_numa(void) { return g_state.numa.n_nodes > 1; } From 12c23b60c6625fd9f470b04aa364210363ee36f5 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Feb 2024 16:28:49 +0000 Subject: [PATCH 12/31] Fixed lingering init_llama_backend() bool calls in tests and examples --- examples/batched.swift/Sources/main.swift | 2 +- examples/llama.swiftui/llama.cpp.swift/LibLlama.swift | 2 +- tests/test-autorelease.cpp | 2 +- tests/test-model-load-cancel.cpp | 2 +- tests/test-tokenizer-0-falcon.cpp | 2 +- tests/test-tokenizer-0-llama.cpp | 2 +- tests/test-tokenizer-1-bpe.cpp | 2 +- tests/test-tokenizer-1-llama.cpp | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index 4d000534900af..cdbce84351ac2 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -17,7 +17,7 @@ let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(argu let n_len: Int = 32 // init LLM -llama_backend_init(false) +llama_backend_init(GGML_NUMA_STRATEGY_DISABLED) defer { llama_backend_free() } diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index fc79fd3466b54..2470415dcd51e 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -51,7 +51,7 @@ actor LlamaContext { } static func create_context(path: String) throws -> LlamaContext { - llama_backend_init(false) + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED) var model_params = llama_model_default_params() #if targetEnvironment(simulator) diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp index 36a23c0bb790c..fef6683c47e02 100644 --- a/tests/test-autorelease.cpp +++ b/tests/test-autorelease.cpp @@ -12,7 +12,7 @@ int main(int argc, char ** argv) { auto * model_path = get_model_or_exit(argc, argv); std::thread([&model_path]() { - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); auto * model = llama_load_model_from_file(model_path, llama_model_default_params()); auto * ctx = llama_new_context_with_model(model, llama_context_default_params()); llama_free(ctx); diff --git a/tests/test-model-load-cancel.cpp b/tests/test-model-load-cancel.cpp index 7ea4bbaccd8d1..69c5815fd43f6 100644 --- a/tests/test-model-load-cancel.cpp +++ b/tests/test-model-load-cancel.cpp @@ -14,7 +14,7 @@ int main(int argc, char *argv[] ) { fprintf(stderr, "using '%s'\n", model_path); fclose(file); - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); auto params = llama_model_params{}; params.use_mmap = false; params.progress_callback = [](float progress, void * ctx){ diff --git a/tests/test-tokenizer-0-falcon.cpp b/tests/test-tokenizer-0-falcon.cpp index a4e9d2b912728..50bd06557777b 100644 --- a/tests/test-tokenizer-0-falcon.cpp +++ b/tests/test-tokenizer-0-falcon.cpp @@ -61,7 +61,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); // load the vocab { diff --git a/tests/test-tokenizer-0-llama.cpp b/tests/test-tokenizer-0-llama.cpp index 39c8d188c9086..5a75779923f26 100644 --- a/tests/test-tokenizer-0-llama.cpp +++ b/tests/test-tokenizer-0-llama.cpp @@ -60,7 +60,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); // load the vocab { diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp index 386530f23f92c..ed4841198e20c 100644 --- a/tests/test-tokenizer-1-bpe.cpp +++ b/tests/test-tokenizer-1-bpe.cpp @@ -25,7 +25,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); // load the vocab { diff --git a/tests/test-tokenizer-1-llama.cpp b/tests/test-tokenizer-1-llama.cpp index 4b58fe4954cf3..0d4f54d92e464 100644 --- a/tests/test-tokenizer-1-llama.cpp +++ b/tests/test-tokenizer-1-llama.cpp @@ -25,7 +25,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(false); + llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); // load the vocab { From b65c86394752be86caa41b3ad60797e1448b29b8 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Feb 2024 18:07:40 +0000 Subject: [PATCH 13/31] Remote enum llama_numa_strategies --- llama.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/llama.h b/llama.h index 429154656f76c..70b8f5d685252 100644 --- a/llama.h +++ b/llama.h @@ -111,15 +111,6 @@ extern "C" { LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN, }; - enum llama_numa_strategies { - LLAMA_NUMA_STRATEGY_DISABLED = 0, - LLAMA_NUMA_STRATEGY_INTERLEAVE = 1, - LLAMA_NUMA_STRATEGY_ISOLATE = 2, - LLAMA_NUMA_STRATEGY_NUMACTL = 3, - LLAMA_NUMA_STRATEGY_MIRROR = 4, - LLAMA_NUMA_STRATEGY_MAX_VALUE = LLAMA_NUMA_STRATEGY_MIRROR, - }; - enum llama_split_mode { LLAMA_SPLIT_NONE = 0, // single GPU LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs From 7bbe511b8e9d79666cb148203c1678157160ac00 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Feb 2024 19:04:02 +0000 Subject: [PATCH 14/31] Revert bad merge with dynatemp flags --- common/common.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/common/common.cpp b/common/common.cpp index 49f91b83b5897..26042ff4c5c2c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -403,6 +403,18 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } sparams.penalty_present = std::stof(argv[i]); + } else if (arg == "--dynatemp-range") { + if (++i >= argc) { + invalid_param = true; + break; + } + sparams.dynatemp_range = std::stof(argv[i]); + } else if (arg == "--dynatemp-exp") { + if (++i >= argc) { + invalid_param = true; + break; + } + sparams.dynatemp_exponent = std::stof(argv[i]); } else if (arg == "--mirostat") { if (++i >= argc) { invalid_param = true; From 314174ddc51528d69ea6300b36815163554475ba Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Feb 2024 19:55:47 +0000 Subject: [PATCH 15/31] add missing enum ggml_numa_strategies declaration and revert sync problem with master --- ggml.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ggml.c b/ggml.c index 3fa71f2645c15..452cec2822454 100644 --- a/ggml.c +++ b/ggml.c @@ -1912,7 +1912,7 @@ struct ggml_numa_node { }; struct ggml_numa_nodes { - uint32_t numa_strategy; + enum ggml_numa_strategies numa_strategy; struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system @@ -11912,8 +11912,10 @@ GGML_CALL void ggml_rope_yarn_corr_dims( int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] ) { // start and end correction dims - dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base))); - dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base))); + float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base)); + float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base)); + dims[0] = MAX(0, start); + dims[1] = MIN(n_dims - 1, end); } static void ggml_compute_forward_rope_f32( From c2c31660a5ab85cb1b3141d9ce9a23a3930bf40c Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Feb 2024 21:41:36 +0000 Subject: [PATCH 16/31] add missing enum ggml_numa_strategies declaration --- ggml.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.h b/ggml.h index 3e3bb0e214364..58b044526c937 100644 --- a/ggml.h +++ b/ggml.h @@ -667,7 +667,7 @@ extern "C" { GGML_API void ggml_print_backtrace(void); - GGML_API void ggml_numa_init(uint32_t numa); // call once for better performance on NUMA systems + GGML_API void ggml_numa_init(enum ggml_numa_strategies numa); // call once for better performance on NUMA systems GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node GGML_API void ggml_print_object (const struct ggml_object * obj); From e107c4cd546bec089d8410f8ef72bd2d274624f9 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Feb 2024 22:00:35 +0000 Subject: [PATCH 17/31] fixed ggml_init_numa variable --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index 452cec2822454..1527b4d407757 100644 --- a/ggml.c +++ b/ggml.c @@ -1960,7 +1960,7 @@ static cpu_set_t ggml_get_numa_affinity(void) { return cpuset; } -void ggml_numa_init(uint32_t numa_flag) { +void ggml_numa_init(enum ggml_numa_strategies numa_flag) { if (g_state.numa.n_nodes > 0) { fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); From 99a203d02f99cb37d478fcecd448470be6b5280a Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 8 Feb 2024 14:21:16 -0800 Subject: [PATCH 18/31] Update ggml.h Co-authored-by: Jared Van Bortel --- ggml.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml.h b/ggml.h index 58b044526c937..9b3393ca85c2f 100644 --- a/ggml.h +++ b/ggml.h @@ -667,8 +667,8 @@ extern "C" { GGML_API void ggml_print_backtrace(void); - GGML_API void ggml_numa_init(enum ggml_numa_strategies numa); // call once for better performance on NUMA systems - GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node + GGML_API void ggml_numa_init(enum ggml_numa_strategies numa); // call once for better performance on NUMA systems + GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node GGML_API void ggml_print_object (const struct ggml_object * obj); GGML_API void ggml_print_objects(const struct ggml_context * ctx); From 9d42825c3fd1239a51d6f7fed9f98a1b5e0aee21 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 13 Feb 2024 06:47:40 +0000 Subject: [PATCH 19/31] Update READMEs with info about numa flags, change INTERLEAVE strategy name to DISTRIBUTE everywhere, implement the improved distribution strategy from @rankaiyx, fix a spelling mistake and un-merge some bad merges --- common/common.cpp | 8 +++++--- examples/main/README.md | 6 +++++- examples/server/README.md | 7 +++++++ examples/server/server.cpp | 4 ++-- ggml.c | 7 ++++--- ggml.h | 2 +- 6 files changed, 24 insertions(+), 10 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 360470c120661..e92046539a78e 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -676,7 +676,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } else { std::string value(argv[i]); - /**/ if (value == "interleave" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_INTERLEAVE; } + /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } else { invalid_param = true; break; } @@ -976,6 +976,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --repeat-penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)sparams.penalty_repeat); printf(" --presence-penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_present); printf(" --frequency-penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_freq); + printf(" --dynatemp-range N dynamic temperature range (default: %.1f, 0.0 = disabled)\n", (double)sparams.dynatemp_range); + printf(" --dynatemp-exp N dynamic temperature exponent (default: %.1f)\n", (double)sparams.dynatemp_exponent); printf(" --mirostat N use Mirostat sampling.\n"); printf(" Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"); printf(" (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", sparams.mirostat); @@ -1030,9 +1032,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); - printf(" - interleave: (default) spread execution evenly over all nodes\n"); + printf(" - distribute: spread execution evenly over all nodes\n"); printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); - printf(" - numactl: use the CPU map provided my numactl\n"); + printf(" - numactl: use the CPU map provided by numactl\n"); printf(" if run without this previously, it is recommended to drop the system page cache before using this\n"); printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n"); if (llama_supports_gpu_offload()) { diff --git a/examples/main/README.md b/examples/main/README.md index c7997f66569a5..7f84e42623274 100644 --- a/examples/main/README.md +++ b/examples/main/README.md @@ -283,7 +283,11 @@ These options help improve the performance and memory usage of the LLaMA models. ### NUMA support -- `--numa`: Attempt optimizations that help on some systems with non-uniform memory access. This currently consists of pinning an equal proportion of the threads to the cores on each NUMA node, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root. +- `--numa distribute`: Pin an equal proportion of the threads to the cores on each NUMA node. This will spread the load amongst all cores on the system, utilitizing all memory channels at the expense of potentially requiring memory to travel over the slow links between nodes. +- `--numa isolate`: Pin all threads to the NUMA node that the program starts on. This limits the number of cores and amount of memory that can be used, but guarantees all memory access remains local to the NUMA node. +- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitraty core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus. + + These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root. ### Memory Float 32 diff --git a/examples/server/README.md b/examples/server/README.md index 0f7373ae86204..8e141d22d1716 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -16,6 +16,13 @@ Command line options: - `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended. - `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped. - `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. +- `--numa STRATEGY`: Attempt one of the below optimization strategies that help on some NUMA systems +- `--numa distribute`: Spread execution evenly over all nodes +- `--numa isolate`: Only spawn threads on CPUs on the node that execution started on +- `--numa numactl`: Use the CPU map provided by numactl +if run without this previously, it is recommended to drop the system page cache before using this +see https://github.com/ggerganov/llama.cpp/issues/1437 + - `--numa`: Attempt optimizations that help on some NUMA systems. - `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains. - `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 72c54b42df5bf..0c9851e96f385 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1840,7 +1840,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); - printf(" - interleave: (default) spread execution evenly over all nodes\n"); + printf(" - distribute: spread execution evenly over all nodes\n"); printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); printf(" - numactl: use the CPU map provided my numactl\n"); if (llama_supports_gpu_offload()) { @@ -2257,7 +2257,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, break; } else { std::string value(argv[i]); - /**/ if (value == "interleave" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_INTERLEAVE; } + /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } else { invalid_param = true; break; } diff --git a/ggml.c b/ggml.c index 30f9bea61cf36..a635b3eccd1dc 100644 --- a/ggml.c +++ b/ggml.c @@ -2537,7 +2537,8 @@ size_t ggml_get_max_tensor_size(const struct ggml_context * ctx) { size_t max_size = 0; for (struct ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor != NULL; tensor = ggml_get_next_tensor(ctx, tensor)) { - max_size = MAX(max_size, ggml_nbytes(tensor)); + size_t bytes = ggml_nbytes(tensor); + max_size = MAX(max_size, bytes); } return max_size; @@ -16672,9 +16673,9 @@ static void set_numa_thread_affinity(int thread_n, int n_threads) { size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); switch(g_state.numa.numa_strategy) { - case GGML_NUMA_STRATEGY_INTERLEAVE: + case GGML_NUMA_STRATEGY_DISTRIBUTE: // run thread on node_num thread_n / (threads per node) - node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes); + node_num = thread_n % g_state.numa.n_nodes; break; case GGML_NUMA_STRATEGY_ISOLATE: // run thread on current_node diff --git a/ggml.h b/ggml.h index a5bb4c1ab7c47..5f59fb531167c 100644 --- a/ggml.h +++ b/ggml.h @@ -661,7 +661,7 @@ extern "C" { // numa strategies enum ggml_numa_strategies { GGML_NUMA_STRATEGY_DISABLED = 0, - GGML_NUMA_STRATEGY_INTERLEAVE = 1, + GGML_NUMA_STRATEGY_DISTRIBUTE = 1, GGML_NUMA_STRATEGY_ISOLATE = 2, GGML_NUMA_STRATEGY_NUMACTL = 3, GGML_NUMA_STRATEGY_MIRROR = 4, From 0fb40ae755ee002669db52ae2faa4fca5bc319e6 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 14 Feb 2024 09:46:06 +0000 Subject: [PATCH 20/31] split numa init out from llama_backend_init and created llama_numa_init. Updated all code paths and samples --- examples/batched-bench/batched-bench.cpp | 3 ++- examples/batched.swift/Sources/main.swift | 2 +- examples/batched/batched.cpp | 3 ++- examples/beam-search/beam-search.cpp | 3 ++- examples/embedding/embedding.cpp | 3 ++- examples/imatrix/imatrix.cpp | 3 ++- examples/infill/infill.cpp | 3 ++- examples/llama-bench/llama-bench.cpp | 3 +-- .../llama.android/app/src/main/cpp/llama-android.cpp | 4 ++-- .../llama.swiftui/llama.cpp.swift/LibLlama.swift | 2 +- examples/llava/llava-cli.cpp | 3 ++- examples/lookahead/lookahead.cpp | 3 ++- examples/lookup/lookup.cpp | 3 ++- examples/main/main.cpp | 3 ++- examples/parallel/parallel.cpp | 3 ++- examples/passkey/passkey.cpp | 3 ++- examples/perplexity/perplexity.cpp | 3 ++- examples/quantize/quantize.cpp | 2 +- examples/server/server.cpp | 3 ++- examples/simple/simple.cpp | 3 ++- examples/speculative/speculative.cpp | 3 ++- examples/tokenize/tokenize.cpp | 2 +- ggml.c | 6 +++--- llama.cpp | 12 +++++++----- llama.h | 5 ++++- tests/test-autorelease.cpp | 2 +- tests/test-model-load-cancel.cpp | 2 +- tests/test-tokenizer-0-falcon.cpp | 2 +- tests/test-tokenizer-0-llama.cpp | 2 +- tests/test-tokenizer-1-bpe.cpp | 2 +- tests/test-tokenizer-1-llama.cpp | 2 +- 31 files changed, 59 insertions(+), 39 deletions(-) diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index b52d684578ceb..55dfd97843895 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -82,7 +82,8 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); // initialize the model diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index cdbce84351ac2..d75c503d58311 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -17,7 +17,7 @@ let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(argu let n_len: Int = 32 // init LLM -llama_backend_init(GGML_NUMA_STRATEGY_DISABLED) +llama_backend_init() defer { llama_backend_free() } diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index b1775e0b0e8d6..eab636692e7d1 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -50,7 +50,8 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); // initialize the model diff --git a/examples/beam-search/beam-search.cpp b/examples/beam-search/beam-search.cpp index 679b382e19b4e..866c6d7a62867 100644 --- a/examples/beam-search/beam-search.cpp +++ b/examples/beam-search/beam-search.cpp @@ -119,7 +119,8 @@ int main(int argc, char ** argv) // Init LLM : //--------------------------------- - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index b4688cf519d15..acff715e99d05 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -74,7 +74,8 @@ int main(int argc, char ** argv) { params.prompt = gpt_random_prompt(rng); } - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index bc9f6fa682f96..f21bc48f3b466 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -568,7 +568,8 @@ int main(int argc, char ** argv) { params.prompt = gpt_random_prompt(rng); } - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model_params mparams = llama_model_params_from_gpt_params(params); diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 72fb133b4fa06..92c67b7cff5c8 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -202,7 +202,8 @@ int main(int argc, char ** argv) { std::mt19937 rng(params.seed); LOG("%s: llama backend init\n", __func__); - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 2a4728612cb31..11410f8ae7625 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1151,8 +1151,7 @@ int main(int argc, char ** argv) { if (!params.verbose) { llama_log_set(llama_null_log_callback, NULL); } - enum ggml_numa_strategies numa = GGML_NUMA_STRATEGY_DISABLED; - llama_backend_init(numa); + llama_backend_init(); // initialize printer std::unique_ptr p; diff --git a/examples/llama.android/app/src/main/cpp/llama-android.cpp b/examples/llama.android/app/src/main/cpp/llama-android.cpp index e2c2dc8367cf4..2beb1e0d5321d 100644 --- a/examples/llama.android/app/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/app/src/main/cpp/llama-android.cpp @@ -274,8 +274,8 @@ Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint emb extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject, jint32 numa) { - llama_backend_init(numa); +Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) { + llama_backend_init(); } extern "C" diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 2470415dcd51e..58fcf40c6fb69 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -51,7 +51,7 @@ actor LlamaContext { } static func create_context(path: String) throws -> LlamaContext { - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED) + llama_backend_init() var model_params = llama_model_default_params() #if targetEnvironment(simulator) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 031e9806dfb67..98f0fd3b25ebb 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -196,7 +196,8 @@ static struct llava_context * llava_init(gpt_params * params) { auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); - llama_backend_init(params->numa); + llama_backend_init(); + llama_numa_init(params->numa); llama_model_params model_params = llama_model_params_from_gpt_params(*params); diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index e55a15a1bf054..e2551e7a494c2 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -54,7 +54,8 @@ int main(int argc, char ** argv) { #endif // LOG_DISABLE_LOGS // init llama.cpp - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model = NULL; llama_context * ctx = NULL; diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index 18235b8a1d31d..b53fae11045b8 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -31,7 +31,8 @@ int main(int argc, char ** argv){ #endif // LOG_DISABLE_LOGS // init llama.cpp - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model = NULL; llama_context * ctx = NULL; diff --git a/examples/main/main.cpp b/examples/main/main.cpp index e8ab8cbae0c92..f5d2f48935eb6 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -185,7 +185,8 @@ int main(int argc, char ** argv) { } LOG("%s: llama backend init\n", __func__); - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index d2e074d9e12b0..7d11fcd593080 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -122,7 +122,8 @@ int main(int argc, char ** argv) { #endif // LOG_DISABLE_LOGS // init llama.cpp - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model = NULL; llama_context * ctx = NULL; diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp index 5c0022832146b..e12a1cdf19a79 100644 --- a/examples/passkey/passkey.cpp +++ b/examples/passkey/passkey.cpp @@ -71,7 +71,8 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); // initialize the model diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index b2c131d4ce6dd..67d2d3293a327 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1809,7 +1809,8 @@ int main(int argc, char ** argv) { params.prompt = gpt_random_prompt(rng); } - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 5f1e3e71bce1a..4a5c504e31c9c 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -237,7 +237,7 @@ int main(int argc, char ** argv) { params.imatrix = &imatrix_data; } - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); // parse command line arguments const std::string fname_inp = argv[arg_idx]; diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 0c9851e96f385..923891839b29e 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2492,7 +2492,8 @@ int main(int argc, char **argv) params.model_alias = params.model; } - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER}, {"commit", LLAMA_COMMIT}}); diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 9cfde8308f18f..39e2d8ea490e3 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -31,7 +31,8 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); // initialize the model diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 7b3af01f339a9..3848791d475ad 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -50,7 +50,8 @@ int main(int argc, char ** argv) { #endif // LOG_DISABLE_LOGS // init llama.cpp - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model_tgt = NULL; llama_model * model_dft = NULL; diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp index 9fdcfc9dc59d7..d95a9247525eb 100644 --- a/examples/tokenize/tokenize.cpp +++ b/examples/tokenize/tokenize.cpp @@ -17,7 +17,7 @@ int main(int argc, char ** argv) { const bool printing_ids = argc > 3 && std::string(argv[3]) == "--ids"; - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); llama_model_params model_params = llama_model_default_params(); model_params.vocab_only = true; diff --git a/ggml.c b/ggml.c index a635b3eccd1dc..c131575b9ebba 100644 --- a/ggml.c +++ b/ggml.c @@ -16663,7 +16663,7 @@ typedef pthread_t ggml_thread_t; // Android's libc implementation "bionic" does not support setting affinity #if defined(__linux__) && !defined(__BIONIC__) -static void set_numa_thread_affinity(int thread_n, int n_threads) { +static void set_numa_thread_affinity(int thread_n) { if (!ggml_is_numa()) { return; } @@ -16731,7 +16731,7 @@ static void clear_numa_thread_affinity(void) { #else // TODO: Windows etc. // (the linux implementation may also work on BSD, someone should test) -static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); } +static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); } static void clear_numa_thread_affinity(void) {} #endif @@ -17031,7 +17031,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { const int n_threads = state->shared->n_threads; - set_numa_thread_affinity(state->ith, n_threads); + set_numa_thread_affinity(state->ith); int node_n = -1; int task_phase = GGML_TASK_FINALIZE; diff --git a/llama.cpp b/llama.cpp index 81f414d3c3e53..23817e09f4875 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11156,7 +11156,7 @@ bool llama_mlock_supported(void) { return llama_supports_mlock(); } -void llama_backend_init(enum ggml_numa_strategies numa) { +void llama_backend_init(void) { ggml_time_init(); // needed to initialize f16 tables @@ -11166,15 +11166,17 @@ void llama_backend_init(enum ggml_numa_strategies numa) { ggml_free(ctx); } - if (numa > 0) { - ggml_numa_init(numa); - } - #ifdef GGML_USE_MPI ggml_mpi_backend_init(); #endif } +void llama_numa_init(enum ggml_numa_strategies numa) { + if (numa > 0) { + ggml_numa_init(numa); + } +} + void llama_backend_free(void) { #ifdef GGML_USE_MPI ggml_mpi_backend_free(); diff --git a/llama.h b/llama.h index fe9e05f1da0da..a20b1f8f17b1b 100644 --- a/llama.h +++ b/llama.h @@ -306,7 +306,10 @@ extern "C" { // Initialize the llama + ggml backend // If numa is true, use NUMA optimizations // Call once at the start of the program - LLAMA_API void llama_backend_init(enum ggml_numa_strategies numa); + LLAMA_API void llama_backend_init(void); + + //optional: + LLAMA_API void llama_numa_init(enum ggml_numa_strategies numa); // Call once at the end of the program - currently only used for MPI LLAMA_API void llama_backend_free(void); diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp index fef6683c47e02..57fa000114d5d 100644 --- a/tests/test-autorelease.cpp +++ b/tests/test-autorelease.cpp @@ -12,7 +12,7 @@ int main(int argc, char ** argv) { auto * model_path = get_model_or_exit(argc, argv); std::thread([&model_path]() { - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); auto * model = llama_load_model_from_file(model_path, llama_model_default_params()); auto * ctx = llama_new_context_with_model(model, llama_context_default_params()); llama_free(ctx); diff --git a/tests/test-model-load-cancel.cpp b/tests/test-model-load-cancel.cpp index 69c5815fd43f6..858535c3c4020 100644 --- a/tests/test-model-load-cancel.cpp +++ b/tests/test-model-load-cancel.cpp @@ -14,7 +14,7 @@ int main(int argc, char *argv[] ) { fprintf(stderr, "using '%s'\n", model_path); fclose(file); - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); auto params = llama_model_params{}; params.use_mmap = false; params.progress_callback = [](float progress, void * ctx){ diff --git a/tests/test-tokenizer-0-falcon.cpp b/tests/test-tokenizer-0-falcon.cpp index 50bd06557777b..472b0b3a8f436 100644 --- a/tests/test-tokenizer-0-falcon.cpp +++ b/tests/test-tokenizer-0-falcon.cpp @@ -61,7 +61,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); // load the vocab { diff --git a/tests/test-tokenizer-0-llama.cpp b/tests/test-tokenizer-0-llama.cpp index 5a75779923f26..0a16cd7eb404b 100644 --- a/tests/test-tokenizer-0-llama.cpp +++ b/tests/test-tokenizer-0-llama.cpp @@ -60,7 +60,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); // load the vocab { diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp index dcbfbdd3e2298..3596ce55af2ce 100644 --- a/tests/test-tokenizer-1-bpe.cpp +++ b/tests/test-tokenizer-1-bpe.cpp @@ -25,7 +25,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); // load the vocab { diff --git a/tests/test-tokenizer-1-llama.cpp b/tests/test-tokenizer-1-llama.cpp index 9a36c9e79dd9b..9333f8686fa1c 100644 --- a/tests/test-tokenizer-1-llama.cpp +++ b/tests/test-tokenizer-1-llama.cpp @@ -25,7 +25,7 @@ int main(int argc, char **argv) { llama_model * model; llama_context * ctx; - llama_backend_init(GGML_NUMA_STRATEGY_DISABLED); + llama_backend_init(); // load the vocab { From 7fb54278137915ed9d02cdac7f75b770579aac6e Mon Sep 17 00:00:00 2001 From: root Date: Wed, 14 Feb 2024 17:14:26 +0000 Subject: [PATCH 21/31] Fix up some boolean vs enum comparisons --- llama.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llama.cpp b/llama.cpp index 23817e09f4875..9c36407673c50 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1034,7 +1034,7 @@ struct llama_mmap { int fd = fileno(file->fp); int flags = MAP_SHARED; // prefetch/readahead impairs performance on NUMA systems - if (numa > 0) { prefetch = 0; } + if (numa) { prefetch = 0; } #ifdef __linux__ // advise the kernel to read the file sequentially (increases readahead) if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) { @@ -1055,7 +1055,7 @@ struct llama_mmap { strerror(errno)); } } - if (numa > 0) { + if (numa) { // advise the kernel not to use readahead // (because the next page might not belong on the same node) if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) { @@ -11172,7 +11172,7 @@ void llama_backend_init(void) { } void llama_numa_init(enum ggml_numa_strategies numa) { - if (numa > 0) { + if (numa != GGML_NUMA_STRATEGY_DISABLED) { ggml_numa_init(numa); } } From e237527feba4df390aede29125333edbbb51b694 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 14 Feb 2024 19:37:10 +0000 Subject: [PATCH 22/31] Added #ifdefs for non-Linux OS that don't have cpu_set_t datatype --- ggml.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ggml.c b/ggml.c index c131575b9ebba..1b616a41621a9 100644 --- a/ggml.c +++ b/ggml.c @@ -1959,7 +1959,11 @@ struct ggml_numa_nodes { uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system uint32_t current_node; // node on which main process is execting +#ifdef __linux__ cpu_set_t cpuset; // cpuset from numactl +#else + uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype +#endif }; // @@ -1993,6 +1997,7 @@ inline static void ggml_critical_section_end(void) { atomic_fetch_sub(&g_state_barrier, 1); } +#ifdef __linux__ static cpu_set_t ggml_get_numa_affinity(void) { cpu_set_t cpuset; pthread_t thread; @@ -2001,6 +2006,12 @@ static cpu_set_t ggml_get_numa_affinity(void) { pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); return cpuset; } +#else +static uint32_t ggml_get_numa_affinity(void) { + uint32_t cpuset = 0; + return cpuset; // No NUMA support outside of Linux, so return a safe datatype set to zero +} +#endif void ggml_numa_init(enum ggml_numa_strategies numa_flag) { if (g_state.numa.n_nodes > 0) { From dc828c4556d6d249cf473fb87ae6462b897e93a7 Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 15 Feb 2024 07:11:28 -0800 Subject: [PATCH 23/31] Update ggml.h Align enum values Co-authored-by: Georgi Gerganov --- ggml.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ggml.h b/ggml.h index 5f59fb531167c..ad65222fa5ea3 100644 --- a/ggml.h +++ b/ggml.h @@ -659,13 +659,13 @@ extern "C" { }; // numa strategies - enum ggml_numa_strategies { - GGML_NUMA_STRATEGY_DISABLED = 0, + enum ggml_numa_strategy { + GGML_NUMA_STRATEGY_DISABLED = 0, GGML_NUMA_STRATEGY_DISTRIBUTE = 1, - GGML_NUMA_STRATEGY_ISOLATE = 2, - GGML_NUMA_STRATEGY_NUMACTL = 3, - GGML_NUMA_STRATEGY_MIRROR = 4, - GGML_NUMA_STRATEGY_MAX_VALUE = GGML_NUMA_STRATEGY_MIRROR, + GGML_NUMA_STRATEGY_ISOLATE = 2, + GGML_NUMA_STRATEGY_NUMACTL = 3, + GGML_NUMA_STRATEGY_MIRROR = 4, + GGML_NUMA_STRATEGY_COUNT }; // misc From 4ffe18ee5e6cf4b756acd0739bc58fa83193e782 Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 15 Feb 2024 07:12:24 -0800 Subject: [PATCH 24/31] Update ggml.c Remove whitespace Co-authored-by: Georgi Gerganov --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index 1b616a41621a9..eb9095ceae509 100644 --- a/ggml.c +++ b/ggml.c @@ -16734,7 +16734,7 @@ static void clear_numa_thread_affinity(void) { int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); } CPU_FREE(cpus); From 1585fec58b3a0a625ce2b4d989e439f353e771b2 Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 15 Feb 2024 07:13:00 -0800 Subject: [PATCH 25/31] Update ggml.c align paremeters Co-authored-by: Georgi Gerganov --- ggml.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index eb9095ceae509..d6c6b3ff561e6 100644 --- a/ggml.c +++ b/ggml.c @@ -16713,7 +16713,7 @@ static void set_numa_thread_affinity(int thread_n) { rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); if (rv) { - fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); + fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); } CPU_FREE(cpus); From c847828c89021441a5f205ebc05ad1cbc4bd2173 Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 15 Feb 2024 07:13:41 -0800 Subject: [PATCH 26/31] Update examples/server/server.cpp remove whitespace and align brace Co-authored-by: Georgi Gerganov --- examples/server/server.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c5f1ab617730c..1873dad2d15a5 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2266,9 +2266,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, break; } else { std::string value(argv[i]); - /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } + /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } - else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } + else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } else { invalid_param = true; break; } } } From 377b58ffe0f73e1828b9138afd5ed78e7871bbea Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 15 Feb 2024 07:15:05 -0800 Subject: [PATCH 27/31] Update common/common.cpp Remove whitespace and align brace Co-authored-by: Georgi Gerganov --- common/common.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index e92046539a78e..c3d4945c72979 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -676,9 +676,9 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } else { std::string value(argv[i]); - /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } + /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } - else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } + else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } else { invalid_param = true; break; } } } else if (arg == "--verbose-prompt") { From da652113f15e446fafd1c72144134a67dc6b1642 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 15 Feb 2024 15:35:32 +0000 Subject: [PATCH 28/31] unified ggml_numa_strategy enum and fixed text alignment in server.cpp example --- common/common.h | 2 +- examples/server/server.cpp | 8 ++++---- ggml.c | 4 ++-- ggml.h | 2 +- llama.cpp | 2 +- llama.h | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/common/common.h b/common/common.h index cf2198c8b07c9..74c1369953d48 100644 --- a/common/common.h +++ b/common/common.h @@ -76,7 +76,7 @@ struct gpt_params { float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; - ggml_numa_strategies numa = GGML_NUMA_STRATEGY_DISABLED; + ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED; // // sampling parameters struct llama_sampling_params sparams; diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 1129de203310c..912c750cc6223 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1855,10 +1855,10 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, { printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } - printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); - printf(" - distribute: spread execution evenly over all nodes\n"); - printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); - printf(" - numactl: use the CPU map provided my numactl\n"); + printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); + printf(" - distribute: spread execution evenly over all nodes\n"); + printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); + printf(" - numactl: use the CPU map provided my numactl\n"); if (llama_supports_gpu_offload()) { printf(" -ngl N, --n-gpu-layers N\n"); printf(" number of layers to store in VRAM\n"); diff --git a/ggml.c b/ggml.c index d6c6b3ff561e6..957fa7c501b75 100644 --- a/ggml.c +++ b/ggml.c @@ -1954,7 +1954,7 @@ struct ggml_numa_node { }; struct ggml_numa_nodes { - enum ggml_numa_strategies numa_strategy; + enum ggml_numa_strategy numa_strategy; struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; uint32_t n_nodes; uint32_t total_cpus; // hardware threads on system @@ -2013,7 +2013,7 @@ static uint32_t ggml_get_numa_affinity(void) { } #endif -void ggml_numa_init(enum ggml_numa_strategies numa_flag) { +void ggml_numa_init(enum ggml_numa_strategy numa_flag) { if (g_state.numa.n_nodes > 0) { fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); diff --git a/ggml.h b/ggml.h index ad65222fa5ea3..270018185f397 100644 --- a/ggml.h +++ b/ggml.h @@ -678,7 +678,7 @@ extern "C" { GGML_API void ggml_print_backtrace(void); - GGML_API void ggml_numa_init(enum ggml_numa_strategies numa); // call once for better performance on NUMA systems + GGML_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node GGML_API void ggml_print_object (const struct ggml_object * obj); diff --git a/llama.cpp b/llama.cpp index 9c36407673c50..2bd59ea7254b8 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11171,7 +11171,7 @@ void llama_backend_init(void) { #endif } -void llama_numa_init(enum ggml_numa_strategies numa) { +void llama_numa_init(enum ggml_numa_strategy numa) { if (numa != GGML_NUMA_STRATEGY_DISABLED) { ggml_numa_init(numa); } diff --git a/llama.h b/llama.h index a20b1f8f17b1b..e338ff56bb8ca 100644 --- a/llama.h +++ b/llama.h @@ -309,7 +309,7 @@ extern "C" { LLAMA_API void llama_backend_init(void); //optional: - LLAMA_API void llama_numa_init(enum ggml_numa_strategies numa); + LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); // Call once at the end of the program - currently only used for MPI LLAMA_API void llama_backend_free(void); From 7d1f026a58a22557be995308fdf935bd1ab810ee Mon Sep 17 00:00:00 2001 From: bmwl Date: Thu, 15 Feb 2024 09:39:32 -0800 Subject: [PATCH 29/31] Update ggml.c simplified return for platforms without NUMA support Co-authored-by: Jared Van Bortel --- ggml.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index 957fa7c501b75..4e302fb7de2f4 100644 --- a/ggml.c +++ b/ggml.c @@ -2008,8 +2008,7 @@ static cpu_set_t ggml_get_numa_affinity(void) { } #else static uint32_t ggml_get_numa_affinity(void) { - uint32_t cpuset = 0; - return cpuset; // No NUMA support outside of Linux, so return a safe datatype set to zero + return 0; // no NUMA support } #endif From a3cf7bf60f61f5927a62c8c16b817703ed9f92e5 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 15 Feb 2024 22:21:09 +0000 Subject: [PATCH 30/31] removed redundant else from cli argument processing of --numa --- common/common.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index c3d4945c72979..8e9ea5b6a38b9 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -674,13 +674,12 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { if (++i >= argc) { invalid_param = true; break; - } else { - std::string value(argv[i]); - /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } - else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } - else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } - else { invalid_param = true; break; } } + std::string value(argv[i]); + /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } + else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } + else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } + else { invalid_param = true; break; } } else if (arg == "--verbose-prompt") { params.verbose_prompt = true; } else if (arg == "--no-display-prompt") { From 26ea983bf1a71e8b8ffe43cb541d1c21d72676ea Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 15 Feb 2024 17:27:18 -0500 Subject: [PATCH 31/31] whitespace --- common/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 8e9ea5b6a38b9..c5e83cc2a9e40 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -676,7 +676,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } std::string value(argv[i]); - /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } + /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } else { invalid_param = true; break; }