From be05b47d7d46789e966f7871cc5d288af5d493db Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 15:05:07 +0000 Subject: [PATCH 01/36] Adapt MP to cpu_executor --- Dockerfile.cpu | 4 +- cmake/cpu_extension.cmake | 4 + csrc/cpu/torch_bindings.cpp | 7 + csrc/cpu/utils.cpp | 81 ++++++++++++ vllm/envs.py | 6 + vllm/executor/cpu_executor.py | 224 +++++++++++++++++++++++++++----- vllm/worker/cpu_model_runner.py | 7 +- vllm/worker/cpu_worker.py | 8 ++ 8 files changed, 303 insertions(+), 38 deletions(-) create mode 100644 csrc/cpu/utils.cpp diff --git a/Dockerfile.cpu b/Dockerfile.cpu index f95d748f1e4be..f0a25628d487d 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -3,7 +3,7 @@ FROM ubuntu:22.04 AS cpu-test-1 RUN apt-get update -y \ - && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 \ + && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev \ && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 # https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html @@ -14,6 +14,8 @@ RUN pip install intel-openmp ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so:$LD_PRELOAD" +RUN echo 'ulimit -c 0' >> ~/.bashrc + RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/cpu/intel_extension_for_pytorch-2.3.100%2Bgit0eb3473-cp310-cp310-linux_x86_64.whl RUN pip install --upgrade pip \ diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index 690559ee265e9..118f9b28e0ae3 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -83,6 +83,8 @@ endif() message(STATUS "CPU extension compile flags: ${CXX_COMPILE_FLAGS}") +list(APPEND LIBS "numa") + # # Define extension targets @@ -95,6 +97,7 @@ set(VLLM_EXT_SRC "csrc/cpu/activation.cpp" "csrc/cpu/attention.cpp" "csrc/cpu/cache.cpp" + "csrc/cpu/utils.cpp" "csrc/cpu/layernorm.cpp" "csrc/cpu/pos_encoding.cpp" "csrc/cpu/torch_bindings.cpp") @@ -104,6 +107,7 @@ define_gpu_extension_target( DESTINATION vllm LANGUAGE CXX SOURCES ${VLLM_EXT_SRC} + LIBRARIES ${LIBS} COMPILE_FLAGS ${CXX_COMPILE_FLAGS} USE_SABI 3 WITH_SOABI diff --git a/csrc/cpu/torch_bindings.cpp b/csrc/cpu/torch_bindings.cpp index 5be0e9810b5b9..7d549e271a30d 100644 --- a/csrc/cpu/torch_bindings.cpp +++ b/csrc/cpu/torch_bindings.cpp @@ -4,6 +4,8 @@ #include +void init_cpu_threads_env(const std::string& cpu_ids); + TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // vLLM custom ops @@ -107,4 +109,9 @@ TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cache_ops), cache_ops) { cache_ops.impl("reshape_and_cache", torch::kCPU, &reshape_and_cache); } +TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _utils), utils) { + // CPU utils + utils.def("init_cpu_threads_env(str cpu_ids) -> ()", &init_cpu_threads_env); +} + REGISTER_EXTENSION(TORCH_EXTENSION_NAME) diff --git a/csrc/cpu/utils.cpp b/csrc/cpu/utils.cpp new file mode 100644 index 0000000000000..a9d204a15565d --- /dev/null +++ b/csrc/cpu/utils.cpp @@ -0,0 +1,81 @@ +#include +#include +#include +#include + +#include "cpu_types.hpp" + +void init_cpu_threads_env(const std::string& cpu_ids) { + bitmask* omp_cpu_mask = numa_parse_cpustring(cpu_ids.c_str()); + TORCH_CHECK(omp_cpu_mask->size > 0); + std::vector omp_cpu_ids; + omp_cpu_ids.reserve(omp_cpu_mask->size); + + constexpr int group_size = 8 * sizeof(*omp_cpu_mask->maskp); + + for (int offset = 0; offset < omp_cpu_mask->size; offset += group_size) { + unsigned long group_mask = omp_cpu_mask->maskp[offset / group_size]; + int i = 0; + while (group_mask) { + if (group_mask & 1) { + omp_cpu_ids.emplace_back(offset + i); + } + ++i; + group_mask >>= 1; + } + } + + // Memory node binding + if (numa_available() != -1) { + int mem_node_id = numa_node_of_cpu(omp_cpu_ids.front()); + + bitmask* mask = numa_parse_nodestring(std::to_string(mem_node_id).c_str()); + bitmask* src_mask = numa_get_membind(); + TORCH_CHECK_LE(numa_max_node(), 64); + + int pid = getpid(); + + // move all existing pages to the specified numa node. + *(src_mask->maskp) = *(src_mask->maskp) ^ *(mask->maskp); + int page_num = numa_migrate_pages(pid, src_mask, mask); + if (page_num == -1) { + TORCH_CHECK(false, + "numa_migrate_pages failed. errno: " + std::to_string(errno)); + } + + // restrict memory allocation node. + numa_set_membind(mask); + numa_set_strict(1); + + bitmask* cpu_mask = numa_allocate_cpumask(); + if (0 != numa_node_to_cpus(mem_node_id, cpu_mask)) { + TORCH_CHECK(false, + "numa_node_to_cpus failed. errno: " + std::to_string(errno)); + } + + // bind all threads to cpu cores of specified node + if (0 != numa_sched_setaffinity(pid, cpu_mask)) { + TORCH_CHECK(false, "numa_sched_setaffinity failed. errno: " + + std::to_string(errno)); + } + + numa_free_nodemask(mask); + numa_free_nodemask(src_mask); + numa_free_nodemask(cpu_mask); + } + + // OMP threads binding + at::set_num_threads((int)omp_cpu_ids.size()); + omp_set_num_threads((int)omp_cpu_ids.size()); +#pragma omp parallel for schedule(static, 1) + for (size_t i = 0; i < omp_cpu_ids.size(); ++i) { + cpu_set_t* mask = CPU_ALLOC(omp_cpu_mask->size); + size_t size = CPU_ALLOC_SIZE(omp_cpu_mask->size); + CPU_ZERO_S(size, mask); + CPU_SET_S(omp_cpu_ids[i], size, mask); + sched_setaffinity(0, sizeof(cpu_set_t), mask); + CPU_FREE(mask); + } + + numa_free_nodemask(omp_cpu_mask); +} \ No newline at end of file diff --git a/vllm/envs.py b/vllm/envs.py index 595992e51db87..7fbbe28cb34c8 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -29,6 +29,7 @@ VLLM_TRACE_FUNCTION: int = 0 VLLM_ATTENTION_BACKEND: Optional[str] = None VLLM_CPU_KVCACHE_SPACE: int = 0 + VLLM_CPU_OMP_THREADS_BIND: str = "" VLLM_OPENVINO_KVCACHE_SPACE: int = 0 VLLM_OPENVINO_CPU_KV_CACHE_PRECISION: Optional[str] = None VLLM_OPENVINO_ENABLE_QUANTIZED_WEIGHTS: bool = False @@ -246,6 +247,11 @@ def get_default_config_root(): "VLLM_CPU_KVCACHE_SPACE": lambda: int(os.getenv("VLLM_CPU_KVCACHE_SPACE", "0")), + # CPU core ids bound by OpenMP threads, e.g., "0-31", "0,1,2", + # "0-31,33". CPU cores of different ranks are separated by '|'. + "VLLM_CPU_OMP_THREADS_BIND": + lambda: os.getenv("VLLM_CPU_OMP_THREADS_BIND", "all"), + # OpenVINO key-value cache space # default is 4GB "VLLM_OPENVINO_KVCACHE_SPACE": diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 23e429dac7232..45de0b88ea218 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -1,16 +1,21 @@ -from typing import List, Set, Tuple +import os +from functools import partial +from typing import Any, Awaitable, List, Optional, Set, Tuple, Union import torch import vllm.envs as envs from vllm.config import CacheConfig, ModelConfig, SchedulerConfig from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase +from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, + ResultHandler, WorkerMonitor) from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import ExecuteModelRequest, SamplerOutput -from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, - make_async) +from vllm.utils import (get_distributed_init_method, get_open_port, + get_vllm_instance_id, make_async) +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -22,46 +27,155 @@ class CPUExecutor(ExecutorBase): def _init_executor(self) -> None: assert self.device_config.device_type == "cpu" assert self.lora_config is None, "cpu backend doesn't support LoRA" + + # + # Environment variables for CPU executor + # + + # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers + os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() + + # Disable torch async compiling which won't work with daemonic processes + os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" + self.model_config = _verify_and_get_model_config(self.model_config) self.cache_config = _verify_and_get_cache_config(self.cache_config) self.scheduler_config = _verify_and_get_scheduler_config( self.scheduler_config) - # Instantiate the worker and load the model to CPU. - self._init_worker() - - def _init_worker(self): - from vllm.worker.cpu_worker import CPUWorker + # Multiprocessing-based executor does not support multi-node setting. + # Since it only works for single node, we can use the loopback address + # 127.0.0.1 for communication. + ip = "127.0.0.1" + port = get_open_port() + self.distributed_init_method = get_distributed_init_method(ip, port) + + is_async = isinstance(self, CPUExecutorAsync) + + world_size = self.parallel_config.tensor_parallel_size + result_handler = ResultHandler() + self.parallel_worker_tasks: Optional[Union[Any, Awaitable[Any]]] = None + self.workers = [] + + if is_async: + self.workers = [ + ProcessWorkerWrapper( + result_handler, + partial( + self._create_worker, + rank=rank, + local_rank=rank, + )) for rank in range(0, world_size) + ] + self.driver_worker = self.workers[0] + self.workers = self.workers[1:] + self.driver_method_invoker = _async_driver_method_invoker + else: + self.driver_worker = self._create_worker() + self.driver_method_invoker = _driver_method_invoker + + if world_size != 1: + self.workers = [ + ProcessWorkerWrapper( + result_handler, + partial( + self._create_worker, + rank=rank, + local_rank=rank, + )) for rank in range(1, world_size) + ] + + if world_size != 1 or is_async: + if is_async: + async_worker_list = self.workers + [self.driver_worker] + else: + async_worker_list = self.workers + self.worker_monitor = WorkerMonitor(async_worker_list, result_handler) + result_handler.start() + self.worker_monitor.start() + + self._run_workers("init_device") + self._run_workers("load_model", + max_concurrent_workers=self.parallel_config. + max_parallel_loading_workers) + + def _create_worker( + self, + local_rank: int = 0, + rank: int = 0, + ): + worker_module_name = "vllm.worker.cpu_worker" + worker_class_name = "CPUWorker" + + wrapper = WorkerWrapperBase( + worker_module_name=worker_module_name, + worker_class_name=worker_class_name, + ) - assert self.parallel_config.world_size == 1, ( - "CPUExecutor only supports single CPU socket currently.") + assert self.distributed_init_method is not None - distributed_init_method = get_distributed_init_method( - get_ip(), get_open_port()) - self.driver_worker = CPUWorker( + kwargs = dict( model_config=self.model_config, parallel_config=self.parallel_config, scheduler_config=self.scheduler_config, device_config=self.device_config, cache_config=self.cache_config, load_config=self.load_config, - local_rank=0, - rank=0, - distributed_init_method=distributed_init_method, + local_rank=local_rank, + rank=rank, + distributed_init_method=self.distributed_init_method, lora_config=self.lora_config, multimodal_config=self.multimodal_config, kv_cache_dtype=self.cache_config.cache_dtype, prompt_adapter_config=self.prompt_adapter_config, - is_driver_worker=True, + is_driver_worker=rank == 0, ) - self.driver_worker.init_device() - self.driver_worker.load_model() + wrapper.init_worker(**kwargs) + + return wrapper.worker + + def _run_workers( + self, + method: str, + *args, + async_run_remote_workers_only: bool = False, + max_concurrent_workers: Optional[int] = None, + **kwargs, + ) -> Any: + """Runs the given method on all workers. + + Args: + async_run_remote_workers_only: If True the method will be run only + in the remote workers, not the driver worker. It will also be + run asynchronously and return a list of futures rather than + blocking on the results. + """ + + if max_concurrent_workers: + raise NotImplementedError( + "max_concurrent_workers is not supported yet.") + + # Start the workers first. + worker_outputs = [ + worker.execute_method(method, *args, **kwargs) + for worker in self.workers + ] + + if async_run_remote_workers_only: + # Just return futures + return worker_outputs + + driver_worker_output = self.driver_method_invoker(self.driver_worker, method, *args, **kwargs) + + # Get the results of the workers. + return [driver_worker_output + ] + [output.get() for output in worker_outputs] def determine_num_available_blocks(self) -> Tuple[int, int]: """Determine the number of available KV blocks by invoking the underlying worker. """ - return self.driver_worker.determine_num_available_blocks() + return self.driver_method_invoker(self.driver_worker, "determine_num_available_blocks") def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None: @@ -74,25 +188,52 @@ def initialize_cache(self, num_gpu_blocks: int, # referred as `gpu block`. Because we want to reuse the existing block # management procedure. logger.info("# CPU blocks: %d", num_gpu_blocks) - self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks) + + self._run_workers("initialize_cache", + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks) def execute_model( self, execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: - output = self.driver_worker.execute_model(execute_model_req) + if (self.parallel_config.tensor_parallel_size > 1 and + self.parallel_worker_tasks is None): + self.parallel_worker_tasks = self._run_workers( + "start_worker_execution_loop", + async_run_remote_workers_only=True, + ) + output = self.driver_method_invoker(self.driver_worker, "execute_model", execute_model_req) return output + def stop_remote_worker_execution_loop(self) -> None: + if self.parallel_worker_tasks is None: + return + """ + Passing None will cause the driver to stop the model execution + loop running in each of the remote workers. + """ + self.driver_method_invoker(self.driver_worker, "execute_model", None) + parallel_worker_tasks = self.parallel_worker_tasks + self.parallel_worker_tasks = None + # Ensure that workers exit model loop cleanly + # (this will raise otherwise) + self._wait_for_tasks_completion(parallel_worker_tasks) + def add_lora(self, lora_request: LoRARequest) -> bool: - return self.driver_worker.add_lora(lora_request) + return all(self._run_workers("add_lora", lora_request)) def remove_lora(self, lora_id: int) -> bool: - return self.driver_worker.remove_lora(lora_id) + return all(self._run_workers("remove_lora", lora_id)) def pin_lora(self, lora_id: int) -> bool: - return self.driver_worker.pin_lora(lora_id) + assert lora_id > 0, "lora_id must be greater than 0." + return all(self._run_workers( + "pin_lora", + lora_id=lora_id, + )) def list_loras(self) -> Set[int]: - return self.driver_worker.list_loras() + return self.driver_method_invoker(self.driver_worker, "list_loras") def add_prompt_adapter( self, prompt_adapter_request: PromptAdapterRequest) -> bool: @@ -108,24 +249,33 @@ def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool: return self.driver_worker.pin_prompt_adapter(prompt_adapter_id) def check_health(self) -> None: - # CPUExecutor will always be healthy as long as - # it's running. - return - + """Raises an error if engine is unhealthy.""" + if self.worker_monitor is not None and not self.worker_monitor.is_alive( + ): + raise RuntimeError("Worker processes are not running") + + def shutdown(self): + if (worker_monitor := getattr(self, "worker_monitor", + None)) is not None: + worker_monitor.close() + + def _wait_for_tasks_completion(self, parallel_worker_tasks: Any) -> None: + """Wait for futures returned from _run_workers() with + async_run_remote_workers_only to complete.""" + for result in parallel_worker_tasks: + result.get() class CPUExecutorAsync(CPUExecutor, ExecutorAsyncBase): async def execute_model_async( self, execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: - output = await make_async(self.driver_worker.execute_model + output = await make_async(self.execute_model )(execute_model_req=execute_model_req, ) return output async def check_health_async(self) -> None: - # CPUExecutor will always be healthy as long as - # it's running. - return + self.check_health() def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: @@ -170,3 +320,9 @@ def _verify_and_get_cache_config(config: CacheConfig) -> CacheConfig: f" {kv_cache_space}, expect a positive integer value.") return config + +def _driver_method_invoker(driver, method: str, *args, **kwargs): + return getattr(driver, method)(*args, **kwargs) + +def _async_driver_method_invoker(driver, method: str, *args, **kwargs): + return driver.execute_method(method, *args, **kwargs).get() \ No newline at end of file diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 83f4ba69fb728..71763c08ec45f 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -42,6 +42,7 @@ class CPUModelInput(ModelRunnerInputBase): attn_metadata: Optional["AttentionMetadata"] = None sampling_metadata: Optional["SamplingMetadata"] = None multi_modal_kwargs: Optional[Mapping[str, BatchedTensors]] = None + virtual_engine: Optional[int] = None def as_broadcastable_tensor_dict( self) -> Dict[str, Union[int, torch.Tensor]]: @@ -204,8 +205,8 @@ def _prepare_prompt( attn_metadata = self.attn_backend.make_metadata( is_prompt=True, seq_lens=seq_lens, - seq_lens_tensor=None, - max_decode_seq_len=None, + seq_lens_tensor=torch.tensor([]), + max_decode_seq_len=0, num_prefills=len(seq_lens), num_prefill_tokens=num_prompt_tokens, num_decode_tokens=0, @@ -345,7 +346,7 @@ def prepare_model_input( multi_modal_kwargs=multi_modal_kwargs, ) - @torch.inference_mode() + @torch.no_grad() def execute_model( self, model_input: CPUModelInput, diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 3c22c73267b7f..cc8bfbe429746 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -4,6 +4,7 @@ import torch import torch.distributed +import vllm.envs as envs from vllm.attention import get_attn_backend from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, MultiModalConfig, ParallelConfig, @@ -176,7 +177,14 @@ def __init__( self.cache_engine: List[CPUCacheEngine] self.cpu_cache: List[List[torch.Tensor]] + omp_cpuids = envs.VLLM_CPU_OMP_THREADS_BIND + if omp_cpuids == "all": + self.local_omp_cpuid = "all" + else: + self.local_omp_cpuid = omp_cpuids.split("|")[rank] + def init_device(self) -> None: + torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid) self.init_distributed_environment() # Set random seed. set_random_seed(self.model_config.seed) From be11ea557026a914d6fc63188ed665f95daaca4d Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 15:57:05 +0000 Subject: [PATCH 02/36] Add CI --- .buildkite/run-cpu-test.sh | 3 +++ Dockerfile.cpu | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index a7678aae54644..b8ff7b29f2f2e 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -26,3 +26,6 @@ docker exec cpu-test bash -c "cd tests; pip install pytest Pillow protobuf cd ../ pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported + +# online inference +docker exec cpu-test bash -c "VLLM_CPU_KVCACHE_SPACE=10 VLLM_CPU_OMP_THREADS_BIND=all python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & server_pid=$! && wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json && timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 && python3 benchmarks/benchmark_serving.py --backend vllm --dataset-name sharegpt --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json --model facebook/opt-125m --num-prompts 20 --endpoint /v1/completions --tokenizer facebook/opt-125m && echo $server_pid && kill $server_pid" diff --git a/Dockerfile.cpu b/Dockerfile.cpu index f0a25628d487d..6493d42d478f4 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -3,7 +3,7 @@ FROM ubuntu:22.04 AS cpu-test-1 RUN apt-get update -y \ - && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev \ + && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev curl \ && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 # https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html From 3b56f133000dde81d33f3d4e1e118de0d70a7b9c Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 16:15:36 +0000 Subject: [PATCH 03/36] update --- .buildkite/run-cpu-test.sh | 18 +++++++++++++++++- Dockerfile.cpu | 4 ++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index b8ff7b29f2f2e..a516d190cb9c1 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -2,6 +2,8 @@ # It serves a sanity check for compilation and basic model usage. set -ex +docker image prune -f + # Try building the docker image docker build -t cpu-test -f Dockerfile.cpu . docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu . @@ -28,4 +30,18 @@ docker exec cpu-test bash -c "cd tests; pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported # online inference -docker exec cpu-test bash -c "VLLM_CPU_KVCACHE_SPACE=10 VLLM_CPU_OMP_THREADS_BIND=all python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & server_pid=$! && wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json && timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 && python3 benchmarks/benchmark_serving.py --backend vllm --dataset-name sharegpt --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json --model facebook/opt-125m --num-prompts 20 --endpoint /v1/completions --tokenizer facebook/opt-125m && echo $server_pid && kill $server_pid" +docker exec cpu-test bash -c " + export VLLM_CPU_KVCACHE_SPACE=10 + export VLLM_CPU_OMP_THREADS_BIND=96-142 + python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & server_pid=$! + wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 + python3 benchmarks/benchmark_serving.py \ + --backend vllm \ + --dataset-name sharegpt \ + --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json \ + --model facebook/opt-125m \ + --num-prompts 20 \ + --endpoint /v1/completions \ + --tokenizer facebook/opt-125m + echo $server_pid && kill $server_pid" diff --git a/Dockerfile.cpu b/Dockerfile.cpu index 6493d42d478f4..10729049b8690 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -2,8 +2,8 @@ FROM ubuntu:22.04 AS cpu-test-1 -RUN apt-get update -y \ - && apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev curl \ +RUN apt-get update -y \ + && apt-get install -y curl git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev \ && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 # https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html From e1fd2677bf1ffb234855b2b10c319adc7d1ff639 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 16:54:11 +0000 Subject: [PATCH 04/36] update --- .buildkite/run-cpu-test.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index a516d190cb9c1..fd64a615b0161 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -32,8 +32,8 @@ docker exec cpu-test bash -c "cd tests; # online inference docker exec cpu-test bash -c " export VLLM_CPU_KVCACHE_SPACE=10 - export VLLM_CPU_OMP_THREADS_BIND=96-142 - python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & server_pid=$! + export VLLM_CPU_OMP_THREADS_BIND=48-92 + python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 python3 benchmarks/benchmark_serving.py \ @@ -43,5 +43,4 @@ docker exec cpu-test bash -c " --model facebook/opt-125m \ --num-prompts 20 \ --endpoint /v1/completions \ - --tokenizer facebook/opt-125m - echo $server_pid && kill $server_pid" + --tokenizer facebook/opt-125m" From c0ab0199fa05c7627f6f626c96402e33cb3cae68 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 17:26:46 +0000 Subject: [PATCH 05/36] update doc --- .../getting_started/cpu-installation.rst | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 1c97515dbecd9..2a759893bd1d6 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -10,6 +10,7 @@ Table of contents: #. :ref:`Requirements ` #. :ref:`Quick start using Dockerfile ` #. :ref:`Build from source ` +#. :ref:`Related runtime environment variables ` #. :ref:`Intel Extension for PyTorch ` #. :ref:`Performance tips ` @@ -47,7 +48,7 @@ Build from source .. code-block:: console $ sudo apt-get update -y - $ sudo apt-get install -y gcc-12 g++-12 + $ sudo apt-get install -y gcc-12 g++-12 libnuma-dev $ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 - Second, install Python packages for vLLM CPU backend building: @@ -71,6 +72,15 @@ Build from source - If you want to force enable AVX512_BF16 for the cross-compilation, please set environment variable VLLM_CPU_AVX512BF16=1 before the building. +.. _env_intro: + +Related runtime environment variables +------------------------------------- + +- ``VLLM_CPU_KVCACHE_SPACE``: specify the KV Cache size (e.g, ``VLLM_CPU_KVCACHE_SPACE=40`` means 40 GB space for KV cache), larger setting will allow vLLM running more requests in parallel. This parameter should be set based on the hardware configuration and memory management pattern of users. + +- ``VLLM_CPU_OMP_THREADS_BIND``: specify the CPU cores dedicated to the OpenMP threads. For example, ``VLLM_CPU_OMP_THREADS_BIND=0-31`` means there will be 32 OpenMP threads bound on 0-31 CPU cores. + .. _ipex_guidance: Intel Extension for PyTorch @@ -85,8 +95,6 @@ Intel Extension for PyTorch Performance tips ----------------- -- vLLM CPU backend uses environment variable ``VLLM_CPU_KVCACHE_SPACE`` to specify the KV Cache size (e.g, ``VLLM_CPU_KVCACHE_SPACE=40`` means 40 GB space for KV cache), larger setting will allow vLLM running more requests in parallel. This parameter should be set based on the hardware configuration and memory management pattern of users. - - We highly recommend to use TCMalloc for high performance memory allocation and better cache locality. For example, on Ubuntu 22.4, you can run: .. code-block:: console @@ -96,11 +104,17 @@ Performance tips $ export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:$LD_PRELOAD # prepend the library to LD_PRELOAD $ python examples/offline_inference.py # run vLLM -- vLLM CPU backend uses OpenMP for thread-parallel computation. If you want the best performance on CPU, it will be very critical to isolate CPU cores for OpenMP threads with other thread pools (like web-service event-loop), to avoid CPU oversubscription. +- When using the online serving, it is recommended to reserve 1-2 CPU cores for the serving framework to avoid CPU oversubscription. For example, on a platform with 32 physical CPU cores, reserving CPU 30 and 31 for the framework and using CPU 0-29 for OpenMP: + +.. code-block:: console + + $ export VLLM_CPU_KVCACHE_SPACE=40 + $ export VLLM_CPU_OMP_THREADS_BIND=0-29 + $ python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m -- If using vLLM CPU backend on a bare-metal machine, it is recommended to disable the hyper-threading. +- If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU cores using ``VLLM_CPU_OMP_THREADS_BIND``. -- If using vLLM CPU backend on a multi-socket machine with NUMA, be aware to set CPU cores and memory nodes, to avoid the remote memory node access. ``numactl`` is an useful tool for CPU core and memory binding on NUMA platform. Besides, ``--cpuset-cpus`` and ``--cpuset-mems`` arguments of ``docker run`` are also useful. +- If using vLLM CPU backend on a multi-socket machine with NUMA, be aware to set CPU cores using ``VLLM_CPU_OMP_THREADS_BIND`` to avoid cross NUMA node memory access. From 3d653f6ebdaeea717cf03d2b62044ec9fe99a4a0 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 17:27:53 +0000 Subject: [PATCH 06/36] format --- vllm/executor/cpu_executor.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 45de0b88ea218..ef59f748fa325 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -56,7 +56,7 @@ def _init_executor(self) -> None: result_handler = ResultHandler() self.parallel_worker_tasks: Optional[Union[Any, Awaitable[Any]]] = None self.workers = [] - + if is_async: self.workers = [ ProcessWorkerWrapper( @@ -73,7 +73,7 @@ def _init_executor(self) -> None: else: self.driver_worker = self._create_worker() self.driver_method_invoker = _driver_method_invoker - + if world_size != 1: self.workers = [ ProcessWorkerWrapper( @@ -90,7 +90,8 @@ def _init_executor(self) -> None: async_worker_list = self.workers + [self.driver_worker] else: async_worker_list = self.workers - self.worker_monitor = WorkerMonitor(async_worker_list, result_handler) + self.worker_monitor = WorkerMonitor(async_worker_list, + result_handler) result_handler.start() self.worker_monitor.start() @@ -165,7 +166,8 @@ def _run_workers( # Just return futures return worker_outputs - driver_worker_output = self.driver_method_invoker(self.driver_worker, method, *args, **kwargs) + driver_worker_output = self.driver_method_invoker( + self.driver_worker, method, *args, **kwargs) # Get the results of the workers. return [driver_worker_output @@ -175,7 +177,8 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: """Determine the number of available KV blocks by invoking the underlying worker. """ - return self.driver_method_invoker(self.driver_worker, "determine_num_available_blocks") + return self.driver_method_invoker(self.driver_worker, + "determine_num_available_blocks") def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None: @@ -196,13 +199,14 @@ def initialize_cache(self, num_gpu_blocks: int, def execute_model( self, execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]: - if (self.parallel_config.tensor_parallel_size > 1 and - self.parallel_worker_tasks is None): + if (self.parallel_config.tensor_parallel_size > 1 + and self.parallel_worker_tasks is None): self.parallel_worker_tasks = self._run_workers( "start_worker_execution_loop", async_run_remote_workers_only=True, ) - output = self.driver_method_invoker(self.driver_worker, "execute_model", execute_model_req) + output = self.driver_method_invoker(self.driver_worker, + "execute_model", execute_model_req) return output def stop_remote_worker_execution_loop(self) -> None: @@ -265,6 +269,7 @@ def _wait_for_tasks_completion(self, parallel_worker_tasks: Any) -> None: for result in parallel_worker_tasks: result.get() + class CPUExecutorAsync(CPUExecutor, ExecutorAsyncBase): async def execute_model_async( @@ -321,8 +326,10 @@ def _verify_and_get_cache_config(config: CacheConfig) -> CacheConfig: return config + def _driver_method_invoker(driver, method: str, *args, **kwargs): return getattr(driver, method)(*args, **kwargs) + def _async_driver_method_invoker(driver, method: str, *args, **kwargs): - return driver.execute_method(method, *args, **kwargs).get() \ No newline at end of file + return driver.execute_method(method, *args, **kwargs).get() From 84d80291fbfd9d82ed0381c31260aa791ed3cdc2 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 3 Jul 2024 17:29:12 +0000 Subject: [PATCH 07/36] update --- docs/source/getting_started/cpu-installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 2a759893bd1d6..0f3155638bbe3 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -112,7 +112,7 @@ Performance tips $ export VLLM_CPU_OMP_THREADS_BIND=0-29 $ python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m -- If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU cores using ``VLLM_CPU_OMP_THREADS_BIND``. +- If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU core using ``VLLM_CPU_OMP_THREADS_BIND``. - If using vLLM CPU backend on a multi-socket machine with NUMA, be aware to set CPU cores using ``VLLM_CPU_OMP_THREADS_BIND`` to avoid cross NUMA node memory access. From c7fee29edf8ca0f0a7332df9d76adb5bc4d83c03 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 4 Jul 2024 02:43:02 +0000 Subject: [PATCH 08/36] Fix --- .buildkite/run-cpu-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index fd64a615b0161..0395f219b8a5e 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -15,9 +15,9 @@ remove_docker_container # Run the image docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ - --cpuset-mems=1 --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test cpu-test + --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test cpu-test docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ - --cpuset-mems=1 --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test-avx2 cpu-test-avx2 + --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test-avx2 cpu-test-avx2 # offline inference docker exec cpu-test bash -c "python3 examples/offline_inference.py" From 0d85c416be5d5a6854715f52d8f005877897de63 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 4 Jul 2024 03:19:51 +0000 Subject: [PATCH 09/36] trigger From 991e8b3907070a2c554050d6946fbb6899da16ef Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 4 Jul 2024 04:11:56 +0000 Subject: [PATCH 10/36] trigger From 1845d955a1f426e78acec4f076eabc4dffc86085 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 4 Jul 2024 05:28:35 +0000 Subject: [PATCH 11/36] trigger From 766705e34e52691fc90010087487c660ad2a530a Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 4 Jul 2024 07:13:24 +0000 Subject: [PATCH 12/36] fix --- .buildkite/run-cpu-test.sh | 4 +--- vllm/executor/cpu_executor.py | 4 +--- vllm/worker/cpu_worker.py | 3 ++- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 0395f219b8a5e..4313d28ddf66d 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -20,13 +20,11 @@ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/hugg --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test-avx2 cpu-test-avx2 # offline inference -docker exec cpu-test bash -c "python3 examples/offline_inference.py" docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" # Run basic model test -docker exec cpu-test bash -c "cd tests; +docker exec cpu-test bash -c " pip install pytest Pillow protobuf - cd ../ pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported # online inference diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index ef59f748fa325..09699b9035059 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -96,9 +96,7 @@ def _init_executor(self) -> None: self.worker_monitor.start() self._run_workers("init_device") - self._run_workers("load_model", - max_concurrent_workers=self.parallel_config. - max_parallel_loading_workers) + self._run_workers("load_model") def _create_worker( self, diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index cc8bfbe429746..6cb5ce537e120 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -184,7 +184,8 @@ def __init__( self.local_omp_cpuid = omp_cpuids.split("|")[rank] def init_device(self) -> None: - torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid) + if self.local_omp_cpuid != "all": + torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid) self.init_distributed_environment() # Set random seed. set_random_seed(self.model_config.seed) From fae953458da9604f9f2aeea439a2643b8bfa4220 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 4 Jul 2024 07:32:53 +0000 Subject: [PATCH 13/36] Fix --- .buildkite/run-cpu-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 4313d28ddf66d..e4d6887afd3cf 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -5,8 +5,8 @@ set -ex docker image prune -f # Try building the docker image -docker build -t cpu-test -f Dockerfile.cpu . -docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu . +numactl -C 48-95 -N 1 docker build -t cpu-test -f Dockerfile.cpu . +numactl -C 48-95 -N 1 docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu . # Setup cleanup remove_docker_container() { docker rm -f cpu-test cpu-test-avx2 || true; } From 6cfbedf4bf47d63bb20e8a33e1ec95b1edfdbaad Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 25 Jul 2024 12:22:01 +0000 Subject: [PATCH 14/36] fix conflict --- .buildkite/run-cpu-test.sh | 2 +- vllm/executor/cpu_executor.py | 15 +++++++++++++++ vllm/utils.py | 21 --------------------- vllm/worker/cpu_worker.py | 5 +---- 4 files changed, 17 insertions(+), 26 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index e4d6887afd3cf..f5725a271c2f4 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -32,7 +32,7 @@ docker exec cpu-test bash -c " export VLLM_CPU_KVCACHE_SPACE=10 export VLLM_CPU_OMP_THREADS_BIND=48-92 python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & - wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + wget -q https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 python3 benchmarks/benchmark_serving.py \ --backend vllm \ diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 09699b9035059..8842c3656002c 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -38,6 +38,21 @@ def _init_executor(self) -> None: # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" + # Intel OpenMP setting + ld_prealod_str = os.getenv("LD_PRELOAD", "") + if "libiomp5.so" in ld_prealod_str: + # The time(milliseconds) that a thread should wait after + # completing the execution of a parallel region, before sleeping. + os.environ['KMP_BLOCKTIME'] = "1" + # dump settings on start up + os.environ['KMP_SETTINGS'] = "1" + # Prevents the CPU to run into low performance state + os.environ['KMP_TPAUSE'] = "0" + # Provides fine granularity parallelism + os.environ['KMP_FORKJOIN_BARRIER_PATTERN'] = "dist,dist" + os.environ['KMP_PLAIN_BARRIER_PATTERN'] = "dist,dist" + os.environ['KMP_REDUCTION_BARRIER_PATTERN'] = "dist,dist" + self.model_config = _verify_and_get_model_config(self.model_config) self.cache_config = _verify_and_get_cache_config(self.cache_config) self.scheduler_config = _verify_and_get_scheduler_config( diff --git a/vllm/utils.py b/vllm/utils.py index 876c3bf90b02c..90be09fc7b967 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -404,27 +404,6 @@ def update_environment_variables(envs: Dict[str, str]): os.environ[k] = v -def init_kmp_env(): - if not is_cpu(): - return - - ld_prealod_str = os.getenv("LD_PRELOAD", "") - if "libiomp5.so" not in ld_prealod_str: - return - - # The time(milliseconds) that a thread should wait after completing the - # execution of a parallel region, before sleeping. - os.environ['KMP_BLOCKTIME'] = "1" - # dump settings on start up - os.environ['KMP_SETTINGS'] = "1" - # Prevents the CPU to run into low performance state - os.environ['KMP_TPAUSE'] = "0" - # Provides fine granularity parallelism - os.environ['KMP_FORKJOIN_BARRIER_PATTERN'] = "dist,dist" - os.environ['KMP_PLAIN_BARRIER_PATTERN'] = "dist,dist" - os.environ['KMP_REDUCTION_BARRIER_PATTERN'] = "dist,dist" - - def chunk_list(lst: List[T], chunk_size: int): """Yield successive chunk_size chunks from lst.""" for i in range(0, len(lst), chunk_size): diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 6cb5ce537e120..809b9daa7a023 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -14,7 +14,7 @@ from vllm.logger import init_logger from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest -from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, init_kmp_env +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE from vllm.worker.cpu_model_runner import CPUModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, LoraNotSupportedWorkerBase, WorkerInput) @@ -153,9 +153,6 @@ def __init__( if self.is_driver_worker: assert self.rank == 0, "The driver worker must have rank 0." - # try to initialize intel openmp optimized tunings - init_kmp_env() - if self.model_config.trust_remote_code: # note: lazy import to avoid importing torch before initializing from vllm.utils import init_cached_hf_modules From 51d0eda2c7cf1ab9c569712b77152618b44fc394 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 5 Jul 2024 02:10:41 +0000 Subject: [PATCH 15/36] disable KMP setting dumping --- vllm/executor/cpu_executor.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 8842c3656002c..7f4ae8054d5b2 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -44,8 +44,6 @@ def _init_executor(self) -> None: # The time(milliseconds) that a thread should wait after # completing the execution of a parallel region, before sleeping. os.environ['KMP_BLOCKTIME'] = "1" - # dump settings on start up - os.environ['KMP_SETTINGS'] = "1" # Prevents the CPU to run into low performance state os.environ['KMP_TPAUSE'] = "0" # Provides fine granularity parallelism From ddcae0d2e3df03f6439767ec35e9ab4e95a2e1b7 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 5 Jul 2024 04:05:19 +0000 Subject: [PATCH 16/36] trigger From 331b4f5d6125eeb312f585c724c95c5b209638f6 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 5 Jul 2024 05:54:22 +0000 Subject: [PATCH 17/36] trigger From 8af42c04af8ad3f950f3c3c9b321ece7a4d6f9c7 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 5 Jul 2024 09:12:01 +0000 Subject: [PATCH 18/36] trigger From f61b9a8cc81c660445f00c6ec70c1a4f8c441f38 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Tue, 9 Jul 2024 02:12:51 +0000 Subject: [PATCH 19/36] trigger From c3d028e18330de19414baf75128d9fa03c9a76cd Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 11 Jul 2024 08:14:37 +0000 Subject: [PATCH 20/36] Update --- csrc/cpu/utils.cpp | 42 +++++++++++------------------------ vllm/executor/cpu_executor.py | 2 +- vllm/worker/cpu_worker.py | 15 ++++++++----- 3 files changed, 24 insertions(+), 35 deletions(-) diff --git a/csrc/cpu/utils.cpp b/csrc/cpu/utils.cpp index a9d204a15565d..e8471230022d5 100644 --- a/csrc/cpu/utils.cpp +++ b/csrc/cpu/utils.cpp @@ -28,10 +28,8 @@ void init_cpu_threads_env(const std::string& cpu_ids) { // Memory node binding if (numa_available() != -1) { int mem_node_id = numa_node_of_cpu(omp_cpu_ids.front()); - bitmask* mask = numa_parse_nodestring(std::to_string(mem_node_id).c_str()); bitmask* src_mask = numa_get_membind(); - TORCH_CHECK_LE(numa_max_node(), 64); int pid = getpid(); @@ -46,36 +44,22 @@ void init_cpu_threads_env(const std::string& cpu_ids) { // restrict memory allocation node. numa_set_membind(mask); numa_set_strict(1); - - bitmask* cpu_mask = numa_allocate_cpumask(); - if (0 != numa_node_to_cpus(mem_node_id, cpu_mask)) { - TORCH_CHECK(false, - "numa_node_to_cpus failed. errno: " + std::to_string(errno)); - } - - // bind all threads to cpu cores of specified node - if (0 != numa_sched_setaffinity(pid, cpu_mask)) { - TORCH_CHECK(false, "numa_sched_setaffinity failed. errno: " + - std::to_string(errno)); - } - - numa_free_nodemask(mask); - numa_free_nodemask(src_mask); - numa_free_nodemask(cpu_mask); } // OMP threads binding - at::set_num_threads((int)omp_cpu_ids.size()); - omp_set_num_threads((int)omp_cpu_ids.size()); -#pragma omp parallel for schedule(static, 1) - for (size_t i = 0; i < omp_cpu_ids.size(); ++i) { - cpu_set_t* mask = CPU_ALLOC(omp_cpu_mask->size); - size_t size = CPU_ALLOC_SIZE(omp_cpu_mask->size); - CPU_ZERO_S(size, mask); - CPU_SET_S(omp_cpu_ids[i], size, mask); - sched_setaffinity(0, sizeof(cpu_set_t), mask); - CPU_FREE(mask); - } + omp_set_num_threads((int)omp_cpu_ids.size()); + torch::set_num_threads((int)omp_cpu_ids.size()); + TORCH_CHECK_EQ(omp_cpu_ids.size(), torch::get_num_threads()); + TORCH_CHECK_EQ(omp_cpu_ids.size(), omp_get_max_threads()); + #pragma omp parallel for schedule(static, 1) + for (size_t i = 0; i < omp_cpu_ids.size(); ++i) { + cpu_set_t* mask = CPU_ALLOC(omp_cpu_mask->size); + size_t size = CPU_ALLOC_SIZE(omp_cpu_mask->size); + CPU_ZERO_S(size, mask); + CPU_SET_S(omp_cpu_ids[i], size, mask); + sched_setaffinity(0, sizeof(cpu_set_t), mask); + CPU_FREE(mask); + } numa_free_nodemask(omp_cpu_mask); } \ No newline at end of file diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 7f4ae8054d5b2..7251437461583 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -37,7 +37,7 @@ def _init_executor(self) -> None: # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" - + # Intel OpenMP setting ld_prealod_str = os.getenv("LD_PRELOAD", "") if "libiomp5.so" in ld_prealod_str: diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 809b9daa7a023..3d0e1daa52113 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -1,5 +1,6 @@ """A CPU worker class.""" from typing import Dict, List, Optional, Tuple +import os import torch import torch.distributed @@ -157,6 +158,14 @@ def __init__( # note: lazy import to avoid importing torch before initializing from vllm.utils import init_cached_hf_modules init_cached_hf_modules() + + # Setup OpenMP threads affinity. + omp_cpuids = envs.VLLM_CPU_OMP_THREADS_BIND + if omp_cpuids == "all": + self.local_omp_cpuid = "all" + else: + self.local_omp_cpuid = omp_cpuids.split("|")[rank] + self.model_runner: CPUModelRunner = CPUModelRunner( model_config, parallel_config, @@ -174,15 +183,11 @@ def __init__( self.cache_engine: List[CPUCacheEngine] self.cpu_cache: List[List[torch.Tensor]] - omp_cpuids = envs.VLLM_CPU_OMP_THREADS_BIND - if omp_cpuids == "all": - self.local_omp_cpuid = "all" - else: - self.local_omp_cpuid = omp_cpuids.split("|")[rank] def init_device(self) -> None: if self.local_omp_cpuid != "all": torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid) + self.init_distributed_environment() # Set random seed. set_random_seed(self.model_config.seed) From 6be36d672d6eb03410fe66f359dea5effae5c484 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 12 Jul 2024 06:04:55 +0000 Subject: [PATCH 21/36] Add shm op --- Makefile | 77 +++++ cmake/cpu_extension.cmake | 1 + csrc/cpu/cpu_types_x86.hpp | 11 + csrc/cpu/shm_ccl.cpp | 516 +++++++++++++++++++++++++++++ csrc/cpu/torch_bindings.cpp | 26 ++ vllm/distributed/parallel_state.py | 3 +- vllm/executor/cpu_executor.py | 6 + vllm/worker/cpu_worker.py | 34 +- 8 files changed, 672 insertions(+), 2 deletions(-) create mode 100644 Makefile create mode 100644 csrc/cpu/shm_ccl.cpp diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000..e56042a426d8c --- /dev/null +++ b/Makefile @@ -0,0 +1,77 @@ +.PHONY: clean build + +install_dnnl: + git clone -b rls-v3.5 https://github.com/oneapi-src/oneDNN.git + cd oneDNN && mkdir build && \ + cmake -B build -G Ninja -DONEDNN_LIBRARY_TYPE=STATIC -DONEDNN_BUILD_DOC=OFF -DONEDNN_BUILD_EXAMPLES=OFF -DONEDNN_BUILD_TESTS=OFF -DONEDNN_BUILD_GRAPH=OFF -DONEDNN_ENABLE_WORKLOAD=INFERENCE -DONEDNN_ENABLE_PRIMITIVE=MATMUL && \ + cmake --build build --target install --config Release + +install_deps: + pip install wheel packaging ninja setuptools>=49.4.0 numpy + pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu + +install: + VLLM_TARGET_DEVICE=cpu pip install --no-build-isolation -v -e . + +VLLM_TP_2S_bench: + cd benchmarks && VLLM_CPU_OMP_THREADS_BIND="0-23|24-47" VLLM_CPU_KVCACHE_SPACE=40 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so python3 benchmark_throughput.py --backend=vllm --dataset=./ShareGPT_V3_unfiltered_cleaned_split.json --model=lmsys/vicuna-7b-v1.5 --n=1 --num-prompts=100 --dtype=bfloat16 --trust-remote-code --device=cpu -tp=2 + +VLLM_2S_offline: + ray stop + OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=32-63 --membind=1 ray start --head --num-cpus=32 --num-gpus=0 + cd examples && OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=0-31 --membind=0 python3 offline_inference.py + +VLLM_TP_4S_bench: + cd benchmarks && VLLM_CPU_OMP_THREADS_BIND="0-31|32-63|64-95|96-127" VLLM_CPU_KVCACHE_SPACE=40 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so python3 benchmark_throughput.py --backend=vllm --dataset=./ShareGPT_V3_unfiltered_cleaned_split.json --model=lmsys/vicuna-7b-v1.5 --n=1 --num-prompts=1000 --dtype=bfloat16 --trust-remote-code --device=cpu -tp=4 + +VLLM_4S_offline: + ray stop + OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=32-63 --membind=1 ray start --head --num-cpus=32 --num-gpus=0 + OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=64-95 --membind=2 ray start --address=auto --num-cpus=32 --num-gpus=0 + OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=96-127 --membind=3 ray start --address=auto --num-cpus=32 --num-gpus=0 + cd examples && OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=0-31 --membind=0 python3 offline_inference.py + +HF_TP_bench: + cd benchmarks && python benchmark_throughput.py --backend=hf --dataset=../ShareGPT_V3_unfiltered_cleaned_split.json --model=/root/frameworks.bigdata.dev-ops/vicuna-7b-v1.5/ --n=1 --num-prompts=1 --hf-max-batch-size=1 --trust-remote-code --device=cpu + +VLLM_TP_bench: + cd benchmarks && \ + VLLM_CPU_OMP_THREADS_BIND="0-47" \ + VLLM_CPU_KVCACHE_SPACE=100 \ + TORCH_LOGS="recompiles" \ + LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 \ + python3 benchmark_throughput.py --backend=vllm --dataset=./ShareGPT_V3_unfiltered_cleaned_split.json --model=lmsys/vicuna-7b-v1.5 --n=1 --num-prompts=1000 --dtype=bfloat16 --trust-remote-code --device=cpu + +VLLM_TP_bench_slm: + cd benchmarks && \ + VLLM_CPU_OMP_THREADS_BIND="0-47" \ + VLLM_CPU_KVCACHE_SPACE=100 \ + TORCH_LOGS="recompiles" \ + LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 \ + python3 benchmark_throughput.py --backend=vllm --model=facebook/opt-125m --n=1 --num-prompts=1000 --input-len=128 --output-len=128 --dtype=bfloat16 --trust-remote-code --device=cpu + +VLLM_LT_bench: + cd benchmarks && \ + VLLM_CPU_OMP_THREADS_BIND="0-47" \ + VLLM_CPU_KVCACHE_SPACE=100 \ + TORCH_LOGS="recompiles" \ + LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 \ + python3 benchmark_latency.py --model=facebook/opt-125m --n=1 --batch-size=32 --input-len=1024 --output-len=1024 --num-iters-warmup=1 --num-iters=3 --dtype=bfloat16 --trust-remote-code --device=cpu + +VLLM_SERVE_bench: + cd benchmarks && python -m vllm.entrypoints.api_server \ + --model /root/HF_models/vicuna-7b-v1.5/ --swap-space 40 \ + --disable-log-requests --dtype=bfloat16 --device cpu & \ + cd benchmarks && sleep 30 && python benchmark_serving.py \ + --backend vllm \ + --tokenizer /root/HF_models/vicuna-7b-v1.5/ --dataset /root/HF_models/ShareGPT_V3_unfiltered_cleaned_split.json \ + --request-rate 10 + +VLLM_Serve: + cd benchmarks && VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-47" LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 python3 -m vllm.entrypoints.openai.api_server --model lmsys/vicuna-7b-v1.5 --dtype=bfloat16 --device cpu + +VLLM_2S_Serve: + cd benchmarks && VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-23|24-47" LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 python3 -m vllm.entrypoints.openai.api_server --model lmsys/vicuna-7b-v1.5 --dtype=bfloat16 --device cpu -tp=2 + +VLLM_bench_client: + cd benchmarks && python3 benchmark_serving.py --backend vllm --model lmsys/vicuna-7b-v1.5 --tokenizer lmsys/vicuna-7b-v1.5 --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json --request-rate 4 --num-prompts 1000 \ No newline at end of file diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index 118f9b28e0ae3..b19c1ce21e20a 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -98,6 +98,7 @@ set(VLLM_EXT_SRC "csrc/cpu/attention.cpp" "csrc/cpu/cache.cpp" "csrc/cpu/utils.cpp" + "csrc/cpu/shm_ccl.cpp" "csrc/cpu/layernorm.cpp" "csrc/cpu/pos_encoding.cpp" "csrc/cpu/torch_bindings.cpp") diff --git a/csrc/cpu/cpu_types_x86.hpp b/csrc/cpu/cpu_types_x86.hpp index f50620a5287d4..192df7f2e63d4 100644 --- a/csrc/cpu/cpu_types_x86.hpp +++ b/csrc/cpu/cpu_types_x86.hpp @@ -510,6 +510,17 @@ inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) { inline void prefetch(const void *addr) { _mm_prefetch(addr, _MM_HINT_T1); } +inline void non_temporal_save(BF16Vec32 &vec, void *ptr) { + _mm512_stream_si512((__m512i *)ptr, vec.reg); +} + +inline void non_temporal_save(BF16Vec16 &vec, void *ptr) { + _mm256_stream_si256((__m256i *)ptr, vec.reg); +} + +inline void non_temporal_save(FP32Vec16 &vec, void *ptr) { + _mm512_stream_ps((float *)ptr, vec.reg); +} }; // namespace vec_op #endif diff --git a/csrc/cpu/shm_ccl.cpp b/csrc/cpu/shm_ccl.cpp new file mode 100644 index 0000000000000..b34481b13ba83 --- /dev/null +++ b/csrc/cpu/shm_ccl.cpp @@ -0,0 +1,516 @@ +#include "cpu/cpu_types.hpp" + +#include +#include +#include +#include + +namespace { +#define MAX_SHM_RANK_NUM 8 + +template +struct KernelVecType { + using scalar_vec_t = void; +}; + +template <> +struct KernelVecType { + using scalar_vec_t = vec_op::FP32Vec16; +}; + +template <> +struct KernelVecType { + using scalar_vec_t = vec_op::BF16Vec16; +}; + +enum class RankStat : char { READY = 0, EXECUTE, DONE }; + +struct SHMContext { + volatile RankStat rank_stat; + char _padding1[60]; + int rank; + int group_size; + size_t rank_buffer_size; + SHMContext* shm_contexts[MAX_SHM_RANK_NUM]; + char _padding2[48]; + + SHMContext(const int rank, const int group_size, + const size_t rank_buffer_size) + : rank(rank), group_size(group_size), rank_buffer_size(rank_buffer_size) { + static_assert(sizeof(SHMContext) % 64 == 0); + TORCH_CHECK(group_size <= MAX_SHM_RANK_NUM); + TORCH_CHECK(rank < MAX_SHM_RANK_NUM); + TORCH_CHECK((size_t)this % 64 == 0); + for (int i = 0; i < MAX_SHM_RANK_NUM; ++i) { + shm_contexts[i] = nullptr; + } + set_context(rank, this); + rank_stat = RankStat::DONE; + } + + void set_context(int rank, SHMContext* ptr) { + TORCH_CHECK(rank < MAX_SHM_RANK_NUM); + TORCH_CHECK(ptr); + shm_contexts[rank] = ptr; + } + + template + T* rank_ptr(int rank) { + return reinterpret_cast(shm_contexts[rank] + 1); + } + + RankStat get_rank_stat(int rank) const { + return shm_contexts[rank]->rank_stat; + } + + bool is_all_done() { + for (int i = 0; i < group_size; ++i) { + if (shm_contexts[i]->rank_stat != RankStat::DONE) { + return false; + } + } + return true; + } + + bool is_last() const { return rank == (group_size - 1); } + + void set_rank_stat(int rank, RankStat stat) { + shm_contexts[rank]->rank_stat = stat; + } + + void barrier(const RankStat next_stat) { + if (next_stat == RankStat::READY) { + if (is_last()) { + for (int i = 0; i < group_size; ++i) { + set_rank_stat(i, RankStat::READY); + } + } else { + while (get_rank_stat(rank) != RankStat::READY) _mm_pause(); + } + set_rank_stat(rank, RankStat::EXECUTE); + } else if (next_stat == RankStat::DONE) { + set_rank_stat(rank, RankStat::DONE); + if (is_last()) { + while (!is_all_done()) _mm_pause(); + } + } else { + TORCH_CHECK(false, "Invalid next_stat to barrier."); + } + } + + std::string to_string() const { + std::stringstream ss; + ss << "SHMContext: \nrank_stat: "; + switch (rank_stat) { + case RankStat::READY: + ss << "READY, "; + break; + case RankStat::EXECUTE: + ss << "EXECUTE, "; + break; + case RankStat::DONE: + ss << "DONE, "; + break; + default: + TORCH_CHECK(false, "Invalid RankStat type."); + } + ss << "\nrank: " << rank; + ss << "\ngroup_size: " << group_size; + ss << "\nrank_buffer_size: " << rank_buffer_size; + ss << "\nshm_contexts: ["; + + for (int i = 0; i < group_size; ++i) { + ss << shm_contexts[i]->rank << ", "; + } + ss << "]"; + + return ss.str(); + } +}; + +namespace shm_cc_ops { + +void memcpy_64bytes(void* dst, void* src, size_t len) { + constexpr size_t align_len = 64; + TORCH_CHECK(len % align_len == 0); + TORCH_CHECK((size_t)dst % align_len == 0); + TORCH_CHECK((size_t)src % align_len == 0); +#pragma GCC unroll 4 + for (size_t i = 0; i < len; i += align_len) { + vec_op::BF16Vec32 data((char*)src + i); + vec_op::non_temporal_save(data, (char*)dst + i); + } +} + +void parallel_memcpy(void* dst, void* src, size_t len) { + int thread_num = omp_get_max_threads(); + const size_t partition_num = + (len + 512 * thread_num - 1) / (512 * thread_num); + +#pragma omp parallel for schedule(static, 1) + for (int i = 0; i < thread_num; ++i) { + size_t offset = i * partition_num * 512; + if (offset < len) { + size_t partition_len = std::min(512 * partition_num, len - offset); + memcpy_64bytes((char*)dst + offset, (char*)src + offset, partition_len); + } + } +} + +void gather(SHMContext* ctx, int rank, void* data, size_t len) { + CPU_KERNEL_GUARD_IN(gather) + TORCH_CHECK(len <= ctx->rank_buffer_size); + ctx->barrier(RankStat::READY); + parallel_memcpy(ctx->rank_ptr(rank), data, len); + ctx->barrier(RankStat::DONE); +} + +void broadcast(SHMContext* ctx, int rank, void* data, size_t len) { + CPU_KERNEL_GUARD_IN(broatcast) + ctx->barrier(RankStat::READY); + parallel_memcpy(data, ctx->rank_ptr(0), len); + ctx->barrier(RankStat::DONE); +} + +void scatter(SHMContext* ctx, int rank, void* data, size_t len) { + CPU_KERNEL_GUARD_IN(scatter) + ctx->barrier(RankStat::READY); + parallel_memcpy(data, ctx->rank_ptr(rank), len); + ctx->barrier(RankStat::DONE); +} + +template +void all_reduce_sum_v1(SHMContext* ctx, int rank, scalar_t* data, + size_t elem_num) { + CPU_KERNEL_GUARD_IN(all_reduce_sum_v1) + const size_t bytes = elem_num * sizeof(scalar_t); + TORCH_CHECK(bytes <= ctx->rank_buffer_size); + shm_cc_ops::gather(ctx, rank, data, bytes); + using scalar_vec_t = typename KernelVecType::scalar_vec_t; + constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); + constexpr int CACHELINE_SIZE = 64; + constexpr int PACKED_FACTOR = + CACHELINE_SIZE / (sizeof(scalar_t) * VEC_ELEM_NUM); + TORCH_CHECK(elem_num % VEC_ELEM_NUM == 0); + + ctx->barrier(RankStat::READY); + + int thread_num = omp_get_max_threads(); + size_t partition_num = + (elem_num + thread_num * VEC_ELEM_NUM * PACKED_FACTOR - 1) / + (thread_num * VEC_ELEM_NUM * PACKED_FACTOR); +#pragma omp parallel for schedule(static, 1) + for (int i = 0; i < thread_num; ++i) { + size_t offset = i * partition_num * VEC_ELEM_NUM * PACKED_FACTOR; + if (offset < elem_num) { + const size_t partition_len = std::min( + VEC_ELEM_NUM * PACKED_FACTOR * partition_num, elem_num - offset); + scalar_t* rank_ptrs[RANKS]; + vec_op::unroll_loop([&](int idx) { + rank_ptrs[idx] = ctx->rank_ptr(idx) + offset; + TORCH_CHECK((size_t)rank_ptrs[idx] % 64 == 0); + }); + +#pragma GCC unroll 4 + for (int i = 0; i < partition_len; i += VEC_ELEM_NUM) { + size_t curr_offset = i; + scalar_vec_t data_0(rank_ptrs[0] + curr_offset); + vec_op::FP32Vec16 fp32_data_0(data_0); + vec_op::unroll_loop([&](int k) { + scalar_vec_t data_x(rank_ptrs[k + 1] + curr_offset); + vec_op::FP32Vec16 fp32_data_x(data_x); + fp32_data_0 = fp32_data_0 + fp32_data_x; + }); + data_0 = scalar_vec_t(fp32_data_0); + data_0.save(data + offset + curr_offset); + } + } + } + ctx->barrier(RankStat::DONE); +} + +template +void all_reduce_sum_v2(SHMContext* ctx, int rank, scalar_t* data, + size_t elem_num) { + CPU_KERNEL_GUARD_IN(all_reduce_sum_v2) + const size_t bytes = elem_num * sizeof(scalar_t); + TORCH_CHECK(bytes <= ctx->rank_buffer_size); + shm_cc_ops::gather(ctx, rank, data, bytes); + using scalar_vec_t = typename KernelVecType::scalar_vec_t; + constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); + constexpr int CACHELINE_SIZE = 64; + constexpr int PACKED_FACTOR = + CACHELINE_SIZE / (sizeof(scalar_t) * VEC_ELEM_NUM); + TORCH_CHECK(elem_num % VEC_ELEM_NUM == 0); + + ctx->barrier(RankStat::READY); + + const int world_size = ctx->group_size; + const size_t rank_partition_num = + (elem_num + world_size * VEC_ELEM_NUM * PACKED_FACTOR - 1) / + (world_size * VEC_ELEM_NUM * PACKED_FACTOR); + const size_t rank_offset = + rank * rank_partition_num * VEC_ELEM_NUM * PACKED_FACTOR; + + if (rank_offset >= elem_num) { + ctx->barrier(RankStat::DONE); + return; + } + + const size_t rank_elem_num = + std::min(VEC_ELEM_NUM * PACKED_FACTOR * rank_partition_num, + elem_num - rank_offset); + + int thread_num = omp_get_max_threads(); + size_t partition_num = + (rank_elem_num + thread_num * VEC_ELEM_NUM * PACKED_FACTOR - 1) / + (thread_num * VEC_ELEM_NUM * PACKED_FACTOR); + +#pragma omp parallel for schedule(static, 1) + for (int i = 0; i < thread_num; ++i) { + size_t offset = i * partition_num * VEC_ELEM_NUM * PACKED_FACTOR; + if (offset < rank_elem_num) { + const size_t partition_len = std::min( + VEC_ELEM_NUM * PACKED_FACTOR * partition_num, rank_elem_num - offset); + scalar_t* rank_ptrs[RANKS]; + vec_op::unroll_loop([&](int idx) { + rank_ptrs[idx] = ctx->rank_ptr(idx) + rank_offset + offset; + TORCH_CHECK((size_t)rank_ptrs[idx] % 64 == 0); + }); + +#pragma GCC unroll 4 + for (int i = 0; i < partition_len; i += VEC_ELEM_NUM) { + size_t curr_offset = i; + scalar_vec_t data_0(rank_ptrs[0] + curr_offset); + vec_op::FP32Vec16 fp32_data_0(data_0); + vec_op::unroll_loop([&](int k) { + scalar_vec_t data_x(rank_ptrs[k + 1] + curr_offset); + vec_op::FP32Vec16 fp32_data_x(data_x); + fp32_data_0 = fp32_data_0 + fp32_data_x; + }); + data_0 = scalar_vec_t(fp32_data_0); + vec_op::unroll_loop([&](int k) { + vec_op::non_temporal_save(data_0, rank_ptrs[k] + curr_offset); + }); + } + } + } + ctx->barrier(RankStat::DONE); + + shm_cc_ops::scatter(ctx, rank, data, bytes); +} +}; // namespace shm_cc_ops + +class SHMManager { + public: + explicit SHMManager(const std::string& ip_port, const int group_size, + const int rank, const size_t rank_buffer_size) + : _rank(rank), + _shm_names({""}), + _shared_mem_ptrs({nullptr}), + _shm_ctx(nullptr) { + _shm_names[rank] = get_shm_name(ip_port, rank); + _shared_mem_ptrs[rank] = init_shm(rank, rank_buffer_size); + + _shm_ctx = new (_shared_mem_ptrs[rank]) + SHMContext(rank, group_size, round_size(rank_buffer_size)); + } + + void join(const std::string& ip_port, const int group_size, const int rank, + const size_t rank_buffer_size) { + TORCH_CHECK(rank == _rank); + SHMContext* ctx = get_shm_ctx(); + for (int i = 0; i < group_size; ++i) { + if (i != rank) { + TORCH_CHECK(_shm_names[i].empty()); + TORCH_CHECK(_shared_mem_ptrs[i] == nullptr); + + _shm_names[i] = get_shm_name(ip_port, i); + _shared_mem_ptrs[i] = init_shm(i, rank_buffer_size); + ctx->set_context(i, (SHMContext*)_shared_mem_ptrs[i]); + } + } + } + + ~SHMManager() { destroy_shm(); } + + SHMContext* get_shm_ctx() const { + return reinterpret_cast(_shared_mem_ptrs[_rank]); + } + + static std::string get_shm_name(const std::string& ip_port, int rank) { + return "/vllm_" + ip_port + "_" + std::to_string(rank); + } + + private: + static size_t round_size(const size_t size) { + return ((size + 63) >> 6) << 6; + } + + void* init_shm(int target_rank, const size_t rank_buffer_size) { + const std::string& shm_name = _shm_names[target_rank]; + const int local_rank = _rank; + const size_t rounded_rank_buffer_size = round_size(rank_buffer_size); + const size_t shm_size = sizeof(SHMContext) + rounded_rank_buffer_size; + + int fd = -1; + if (local_rank == target_rank) { + fd = shm_open(shm_name.c_str(), O_CREAT | O_EXCL | O_RDWR, + S_IRUSR | S_IWUSR); + + if (fd == -1) + TORCH_CHECK(false, "create shm in SHMManager failed. errno: " + + std::to_string(errno)); + + if (ftruncate(fd, shm_size) == -1) + TORCH_CHECK(false, "ftruncate in SHMManager failed. errno: " + + std::to_string(errno)); + } else { + fd = shm_open(shm_name.c_str(), O_RDWR, S_IRUSR | S_IWUSR); + + if (fd == -1) + TORCH_CHECK(false, "open shm in SHMManager failed. errno: " + + std::to_string(errno)); + } + + void* shm_ptr = mmap(nullptr, shm_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, fd, 0); + + if (shm_ptr == MAP_FAILED) { + TORCH_CHECK(false, + "mmap in SHMManager failed. errno: " + std::to_string(errno)); + } + + TORCH_CHECK((size_t)shm_ptr % 64 == 0) + + return shm_ptr; + } + + void destroy_shm() { + for (int i = 0; i < MAX_SHM_RANK_NUM; ++i) { + if (!_shm_names[i].empty() && _shared_mem_ptrs[i] != nullptr) { + shm_unlink(_shm_names[i].c_str()); + } + } + } + + int _rank; + std::array _shm_names; + std::array _shared_mem_ptrs; + SHMContext* _shm_ctx; +}; + +static std::unique_ptr shm_manager_singleton = nullptr; + +template +void shm_allreduce_sum(SHMContext* ctx, const int rank, scalar_t* data, + size_t elem_num) { + switch (ctx->group_size) { + case 2: + shm_cc_ops::all_reduce_sum_v1(ctx, rank, data, elem_num); + break; + case 4: + shm_cc_ops::all_reduce_sum_v1(ctx, rank, data, elem_num); + break; + case 8: + shm_cc_ops::all_reduce_sum_v2(ctx, rank, data, elem_num); + break; + default: + TORCH_CHECK(false, + "Invalid world size: " + std::to_string(ctx->group_size)); + } +} + +template +void shm_gather_impl(SHMContext* ctx, const int rank, scalar_t* data, + size_t elem_num, scalar_t** outputs, const int dst) { + CPU_KERNEL_GUARD_IN(shm_gather_impl) + const int worldsize = ctx->group_size; + const size_t bytes = elem_num * sizeof(scalar_t); + TORCH_CHECK(bytes <= ctx->rank_buffer_size); + shm_cc_ops::gather(ctx, rank, data, bytes); + + ctx->barrier(RankStat::READY); + if (rank != dst) { + ctx->barrier(RankStat::DONE); + return; + } + const int thread_num = std::max(omp_get_max_threads() / worldsize, 1); + const size_t partition_num = + (bytes + 512 * thread_num - 1) / (512 * thread_num); + +#pragma omp parallel for collapse(2) schedule(static) + for (int target_rank = 0; target_rank < worldsize; ++target_rank) { + for (int i = 0; i < thread_num; ++i) { + size_t offset = i * partition_num * 512; + if (offset < bytes) { + size_t partition_len = std::min(512 * partition_num, bytes - offset); + shm_cc_ops::memcpy_64bytes( + (char*)(outputs[target_rank]) + offset, + (char*)(ctx->rank_ptr(target_rank)) + offset, + partition_len); + } + } + } + + ctx->barrier(RankStat::DONE); + return; +} + +} // namespace + +void shm_gather(torch::Tensor& data, + const std::optional>& outputs, + int64_t dst, int64_t rank) { + TORCH_CHECK(data.is_contiguous()) + VLLM_DISPATCH_FLOATING_TYPES(data.scalar_type(), "shm_gather_impl", [&] { + CPU_KERNEL_GUARD_IN(shm_gather_impl) + + if (outputs.has_value()) { + TORCH_CHECK_LE(outputs->size(), MAX_SHM_RANK_NUM); + scalar_t* output_ptrs[MAX_SHM_RANK_NUM] = {nullptr}; + for (int i = 0; i < outputs->size(); ++i) { + output_ptrs[i] = outputs->at(i).data_ptr(); + } + shm_gather_impl(shm_manager_singleton->get_shm_ctx(), rank, + data.data_ptr(), data.numel(), output_ptrs, + dst); + } else { + shm_gather_impl(shm_manager_singleton->get_shm_ctx(), rank, + data.data_ptr(), data.numel(), (scalar_t**)(0), + dst); + } + + CPU_KERNEL_GUARD_OUT(shm_gather_impl) + }); +} + +void shm_allreduce(torch::Tensor& data, int64_t rank) { + TORCH_CHECK(data.is_contiguous()) + VLLM_DISPATCH_FLOATING_TYPES(data.scalar_type(), "shm_allreduce_sum", [&] { + CPU_KERNEL_GUARD_IN(shm_allreduce_sum) + shm_allreduce_sum(shm_manager_singleton->get_shm_ctx(), rank, + data.data_ptr(), data.numel()); + CPU_KERNEL_GUARD_OUT(shm_allreduce_sum) + }); +} + +void init_shm_manager(const std::string& ip_port, const int64_t group_size, + const int64_t rank, const int64_t rank_buffer_size) { + if (shm_manager_singleton == nullptr) { + shm_manager_singleton = std::make_unique( + ip_port, group_size, rank, rank_buffer_size); + } else { + TORCH_CHECK( + false, + "Duplicate initialization of shm_manager_singleton is not allowed.") + } +} + +std::string join_shm_manager(const std::string& ip_port, + const int64_t group_size, const int64_t rank, + const int64_t rank_buffer_size) { + TORCH_CHECK(shm_manager_singleton); + shm_manager_singleton->join(ip_port, group_size, rank, rank_buffer_size); + return shm_manager_singleton->get_shm_ctx()->to_string(); +} \ No newline at end of file diff --git a/csrc/cpu/torch_bindings.cpp b/csrc/cpu/torch_bindings.cpp index 7d549e271a30d..179b75c1fcecd 100644 --- a/csrc/cpu/torch_bindings.cpp +++ b/csrc/cpu/torch_bindings.cpp @@ -6,6 +6,19 @@ void init_cpu_threads_env(const std::string& cpu_ids); +void shm_gather(torch::Tensor& data, + const std::optional>& outputs, + int64_t dst, int64_t rank); + +void shm_allreduce(torch::Tensor& data, int64_t rank); + +void init_shm_manager(const std::string& ip_port, const int64_t group_size, + const int64_t rank, const int64_t rank_buffer_size); + +std::string join_shm_manager(const std::string& ip_port, + const int64_t group_size, const int64_t rank, + const int64_t rank_buffer_size); + TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // vLLM custom ops @@ -84,6 +97,19 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { " Tensor! key, int head_size," " Tensor cos_sin_cache, bool is_neox) -> ()"); ops.impl("rotary_embedding", torch::kCPU, &rotary_embedding); + + // SHM based all-reduce + ops.def( + "init_shm_manager(str ip_port, int group_size, int rank, int " + "rank_buffer_size) -> ()", + &init_shm_manager); + ops.def( + "join_shm_manager(str ip_port, int group_size, int rank, int " + "rank_buffer_size) -> str", + &join_shm_manager); + ops.def("shm_allreduce(Tensor! data, int rank) -> ()"); + ops.impl("shm_allreduce", torch::kCPU, &shm_allreduce); + ops.def("shm_gather", &shm_gather); } TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cache_ops), cache_ops) { diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index e9c6fc3a255e4..356f833e6c25c 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -297,7 +297,8 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: if (pynccl_comm is not None and not pynccl_comm.disabled): pynccl_comm.all_reduce(input_) else: - torch.distributed.all_reduce(input_, group=self.device_group) + # torch.distributed.all_reduce(input_, group=self.device_group) + torch.ops._C.shm_allreduce(input_, self.rank) return input_ def all_gather(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor: diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 7251437461583..6511ac6b1b1d7 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -61,6 +61,7 @@ def _init_executor(self) -> None: # 127.0.0.1 for communication. ip = "127.0.0.1" port = get_open_port() + self.ip_port = ip + "_" + str(port) self.distributed_init_method = get_distributed_init_method(ip, port) is_async = isinstance(self, CPUExecutorAsync) @@ -111,6 +112,10 @@ def _init_executor(self) -> None: self._run_workers("init_device") self._run_workers("load_model") + if world_size > 1: + self._run_workers("init_shm_manager") + self._run_workers("join_shm_manager") + def _create_worker( self, local_rank: int = 0, @@ -135,6 +140,7 @@ def _create_worker( load_config=self.load_config, local_rank=local_rank, rank=rank, + ip_port = self.ip_port, distributed_init_method=self.distributed_init_method, lora_config=self.lora_config, multimodal_config=self.multimodal_config, diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 3d0e1daa52113..a2af80af9fe12 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -11,7 +11,7 @@ ModelConfig, MultiModalConfig, ParallelConfig, PromptAdapterConfig, SchedulerConfig) from vllm.distributed import (ensure_model_parallel_initialized, - init_distributed_environment) + init_distributed_environment, parallel_state) from vllm.logger import init_logger from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest @@ -131,6 +131,7 @@ def __init__( load_config: LoadConfig, local_rank: int, rank: int, + ip_port: str, distributed_init_method: str, lora_config: Optional[LoRAConfig] = None, multimodal_config: Optional[MultiModalConfig] = None, @@ -146,6 +147,7 @@ def __init__( self.load_config = load_config self.local_rank = local_rank self.rank = rank + self.ip_port = ip_port self.distributed_init_method = distributed_init_method self.lora_config = lora_config self.prompt_adapter_config = prompt_adapter_config @@ -340,3 +342,33 @@ def get_cache_block_size_bytes(self) -> int: return CPUCacheEngine.get_cache_block_size( self.cache_config.block_size, self.cache_config.cache_dtype, self.model_config, self.parallel_config) + + def init_shm_manager(self): + elem_size = torch.tensor([], + dtype=self.model_config.dtype).element_size() + world_size = parallel_state.get_tensor_model_parallel_world_size() + hidden_size = self.model_config.get_hidden_size() + rank_buffer_size = (self.model_config.max_model_len * hidden_size * + 5 // world_size * elem_size) + torch.ops._C.init_shm_manager( + self.ip_port, + parallel_state.get_tensor_model_parallel_world_size(), + parallel_state.get_tensor_model_parallel_rank(), + rank_buffer_size, + ) + + def join_shm_manager(self): + elem_size = torch.tensor([], + dtype=self.model_config.dtype).element_size() + world_size = parallel_state.get_tensor_model_parallel_world_size() + hidden_size = self.model_config.get_hidden_size() + rank_buffer_size = (self.model_config.max_model_len * hidden_size * + 5 // world_size * elem_size) + ret = torch.ops._C.join_shm_manager( + self.ip_port, + parallel_state.get_tensor_model_parallel_world_size(), + parallel_state.get_tensor_model_parallel_rank(), + rank_buffer_size, + ) + print("rank: ", parallel_state.get_tensor_model_parallel_rank()) + print(ret) From f1fbb24feab7b754a0a821126a8145b954ebbb1b Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 12 Jul 2024 06:27:57 +0000 Subject: [PATCH 22/36] Revert "Add shm op" This reverts commit 72e2c10429e7a5446eb28ae63fa438003dbd1842. --- Makefile | 77 ----- cmake/cpu_extension.cmake | 1 - csrc/cpu/cpu_types_x86.hpp | 11 - csrc/cpu/shm_ccl.cpp | 516 ----------------------------- csrc/cpu/torch_bindings.cpp | 26 -- vllm/distributed/parallel_state.py | 3 +- vllm/executor/cpu_executor.py | 6 - vllm/worker/cpu_worker.py | 34 +- 8 files changed, 2 insertions(+), 672 deletions(-) delete mode 100644 Makefile delete mode 100644 csrc/cpu/shm_ccl.cpp diff --git a/Makefile b/Makefile deleted file mode 100644 index e56042a426d8c..0000000000000 --- a/Makefile +++ /dev/null @@ -1,77 +0,0 @@ -.PHONY: clean build - -install_dnnl: - git clone -b rls-v3.5 https://github.com/oneapi-src/oneDNN.git - cd oneDNN && mkdir build && \ - cmake -B build -G Ninja -DONEDNN_LIBRARY_TYPE=STATIC -DONEDNN_BUILD_DOC=OFF -DONEDNN_BUILD_EXAMPLES=OFF -DONEDNN_BUILD_TESTS=OFF -DONEDNN_BUILD_GRAPH=OFF -DONEDNN_ENABLE_WORKLOAD=INFERENCE -DONEDNN_ENABLE_PRIMITIVE=MATMUL && \ - cmake --build build --target install --config Release - -install_deps: - pip install wheel packaging ninja setuptools>=49.4.0 numpy - pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu - -install: - VLLM_TARGET_DEVICE=cpu pip install --no-build-isolation -v -e . - -VLLM_TP_2S_bench: - cd benchmarks && VLLM_CPU_OMP_THREADS_BIND="0-23|24-47" VLLM_CPU_KVCACHE_SPACE=40 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so python3 benchmark_throughput.py --backend=vllm --dataset=./ShareGPT_V3_unfiltered_cleaned_split.json --model=lmsys/vicuna-7b-v1.5 --n=1 --num-prompts=100 --dtype=bfloat16 --trust-remote-code --device=cpu -tp=2 - -VLLM_2S_offline: - ray stop - OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=32-63 --membind=1 ray start --head --num-cpus=32 --num-gpus=0 - cd examples && OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=0-31 --membind=0 python3 offline_inference.py - -VLLM_TP_4S_bench: - cd benchmarks && VLLM_CPU_OMP_THREADS_BIND="0-31|32-63|64-95|96-127" VLLM_CPU_KVCACHE_SPACE=40 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so python3 benchmark_throughput.py --backend=vllm --dataset=./ShareGPT_V3_unfiltered_cleaned_split.json --model=lmsys/vicuna-7b-v1.5 --n=1 --num-prompts=1000 --dtype=bfloat16 --trust-remote-code --device=cpu -tp=4 - -VLLM_4S_offline: - ray stop - OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=32-63 --membind=1 ray start --head --num-cpus=32 --num-gpus=0 - OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=64-95 --membind=2 ray start --address=auto --num-cpus=32 --num-gpus=0 - OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=96-127 --membind=3 ray start --address=auto --num-cpus=32 --num-gpus=0 - cd examples && OMP_DISPLAY_ENV=VERBOSE VLLM_CPU_KVCACHE_SPACE=40 OMP_PROC_BIND=close numactl --physcpubind=0-31 --membind=0 python3 offline_inference.py - -HF_TP_bench: - cd benchmarks && python benchmark_throughput.py --backend=hf --dataset=../ShareGPT_V3_unfiltered_cleaned_split.json --model=/root/frameworks.bigdata.dev-ops/vicuna-7b-v1.5/ --n=1 --num-prompts=1 --hf-max-batch-size=1 --trust-remote-code --device=cpu - -VLLM_TP_bench: - cd benchmarks && \ - VLLM_CPU_OMP_THREADS_BIND="0-47" \ - VLLM_CPU_KVCACHE_SPACE=100 \ - TORCH_LOGS="recompiles" \ - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 \ - python3 benchmark_throughput.py --backend=vllm --dataset=./ShareGPT_V3_unfiltered_cleaned_split.json --model=lmsys/vicuna-7b-v1.5 --n=1 --num-prompts=1000 --dtype=bfloat16 --trust-remote-code --device=cpu - -VLLM_TP_bench_slm: - cd benchmarks && \ - VLLM_CPU_OMP_THREADS_BIND="0-47" \ - VLLM_CPU_KVCACHE_SPACE=100 \ - TORCH_LOGS="recompiles" \ - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 \ - python3 benchmark_throughput.py --backend=vllm --model=facebook/opt-125m --n=1 --num-prompts=1000 --input-len=128 --output-len=128 --dtype=bfloat16 --trust-remote-code --device=cpu - -VLLM_LT_bench: - cd benchmarks && \ - VLLM_CPU_OMP_THREADS_BIND="0-47" \ - VLLM_CPU_KVCACHE_SPACE=100 \ - TORCH_LOGS="recompiles" \ - LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 \ - python3 benchmark_latency.py --model=facebook/opt-125m --n=1 --batch-size=32 --input-len=1024 --output-len=1024 --num-iters-warmup=1 --num-iters=3 --dtype=bfloat16 --trust-remote-code --device=cpu - -VLLM_SERVE_bench: - cd benchmarks && python -m vllm.entrypoints.api_server \ - --model /root/HF_models/vicuna-7b-v1.5/ --swap-space 40 \ - --disable-log-requests --dtype=bfloat16 --device cpu & \ - cd benchmarks && sleep 30 && python benchmark_serving.py \ - --backend vllm \ - --tokenizer /root/HF_models/vicuna-7b-v1.5/ --dataset /root/HF_models/ShareGPT_V3_unfiltered_cleaned_split.json \ - --request-rate 10 - -VLLM_Serve: - cd benchmarks && VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-47" LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 python3 -m vllm.entrypoints.openai.api_server --model lmsys/vicuna-7b-v1.5 --dtype=bfloat16 --device cpu - -VLLM_2S_Serve: - cd benchmarks && VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-23|24-47" LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4 python3 -m vllm.entrypoints.openai.api_server --model lmsys/vicuna-7b-v1.5 --dtype=bfloat16 --device cpu -tp=2 - -VLLM_bench_client: - cd benchmarks && python3 benchmark_serving.py --backend vllm --model lmsys/vicuna-7b-v1.5 --tokenizer lmsys/vicuna-7b-v1.5 --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json --request-rate 4 --num-prompts 1000 \ No newline at end of file diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index b19c1ce21e20a..118f9b28e0ae3 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -98,7 +98,6 @@ set(VLLM_EXT_SRC "csrc/cpu/attention.cpp" "csrc/cpu/cache.cpp" "csrc/cpu/utils.cpp" - "csrc/cpu/shm_ccl.cpp" "csrc/cpu/layernorm.cpp" "csrc/cpu/pos_encoding.cpp" "csrc/cpu/torch_bindings.cpp") diff --git a/csrc/cpu/cpu_types_x86.hpp b/csrc/cpu/cpu_types_x86.hpp index 192df7f2e63d4..f50620a5287d4 100644 --- a/csrc/cpu/cpu_types_x86.hpp +++ b/csrc/cpu/cpu_types_x86.hpp @@ -510,17 +510,6 @@ inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) { inline void prefetch(const void *addr) { _mm_prefetch(addr, _MM_HINT_T1); } -inline void non_temporal_save(BF16Vec32 &vec, void *ptr) { - _mm512_stream_si512((__m512i *)ptr, vec.reg); -} - -inline void non_temporal_save(BF16Vec16 &vec, void *ptr) { - _mm256_stream_si256((__m256i *)ptr, vec.reg); -} - -inline void non_temporal_save(FP32Vec16 &vec, void *ptr) { - _mm512_stream_ps((float *)ptr, vec.reg); -} }; // namespace vec_op #endif diff --git a/csrc/cpu/shm_ccl.cpp b/csrc/cpu/shm_ccl.cpp deleted file mode 100644 index b34481b13ba83..0000000000000 --- a/csrc/cpu/shm_ccl.cpp +++ /dev/null @@ -1,516 +0,0 @@ -#include "cpu/cpu_types.hpp" - -#include -#include -#include -#include - -namespace { -#define MAX_SHM_RANK_NUM 8 - -template -struct KernelVecType { - using scalar_vec_t = void; -}; - -template <> -struct KernelVecType { - using scalar_vec_t = vec_op::FP32Vec16; -}; - -template <> -struct KernelVecType { - using scalar_vec_t = vec_op::BF16Vec16; -}; - -enum class RankStat : char { READY = 0, EXECUTE, DONE }; - -struct SHMContext { - volatile RankStat rank_stat; - char _padding1[60]; - int rank; - int group_size; - size_t rank_buffer_size; - SHMContext* shm_contexts[MAX_SHM_RANK_NUM]; - char _padding2[48]; - - SHMContext(const int rank, const int group_size, - const size_t rank_buffer_size) - : rank(rank), group_size(group_size), rank_buffer_size(rank_buffer_size) { - static_assert(sizeof(SHMContext) % 64 == 0); - TORCH_CHECK(group_size <= MAX_SHM_RANK_NUM); - TORCH_CHECK(rank < MAX_SHM_RANK_NUM); - TORCH_CHECK((size_t)this % 64 == 0); - for (int i = 0; i < MAX_SHM_RANK_NUM; ++i) { - shm_contexts[i] = nullptr; - } - set_context(rank, this); - rank_stat = RankStat::DONE; - } - - void set_context(int rank, SHMContext* ptr) { - TORCH_CHECK(rank < MAX_SHM_RANK_NUM); - TORCH_CHECK(ptr); - shm_contexts[rank] = ptr; - } - - template - T* rank_ptr(int rank) { - return reinterpret_cast(shm_contexts[rank] + 1); - } - - RankStat get_rank_stat(int rank) const { - return shm_contexts[rank]->rank_stat; - } - - bool is_all_done() { - for (int i = 0; i < group_size; ++i) { - if (shm_contexts[i]->rank_stat != RankStat::DONE) { - return false; - } - } - return true; - } - - bool is_last() const { return rank == (group_size - 1); } - - void set_rank_stat(int rank, RankStat stat) { - shm_contexts[rank]->rank_stat = stat; - } - - void barrier(const RankStat next_stat) { - if (next_stat == RankStat::READY) { - if (is_last()) { - for (int i = 0; i < group_size; ++i) { - set_rank_stat(i, RankStat::READY); - } - } else { - while (get_rank_stat(rank) != RankStat::READY) _mm_pause(); - } - set_rank_stat(rank, RankStat::EXECUTE); - } else if (next_stat == RankStat::DONE) { - set_rank_stat(rank, RankStat::DONE); - if (is_last()) { - while (!is_all_done()) _mm_pause(); - } - } else { - TORCH_CHECK(false, "Invalid next_stat to barrier."); - } - } - - std::string to_string() const { - std::stringstream ss; - ss << "SHMContext: \nrank_stat: "; - switch (rank_stat) { - case RankStat::READY: - ss << "READY, "; - break; - case RankStat::EXECUTE: - ss << "EXECUTE, "; - break; - case RankStat::DONE: - ss << "DONE, "; - break; - default: - TORCH_CHECK(false, "Invalid RankStat type."); - } - ss << "\nrank: " << rank; - ss << "\ngroup_size: " << group_size; - ss << "\nrank_buffer_size: " << rank_buffer_size; - ss << "\nshm_contexts: ["; - - for (int i = 0; i < group_size; ++i) { - ss << shm_contexts[i]->rank << ", "; - } - ss << "]"; - - return ss.str(); - } -}; - -namespace shm_cc_ops { - -void memcpy_64bytes(void* dst, void* src, size_t len) { - constexpr size_t align_len = 64; - TORCH_CHECK(len % align_len == 0); - TORCH_CHECK((size_t)dst % align_len == 0); - TORCH_CHECK((size_t)src % align_len == 0); -#pragma GCC unroll 4 - for (size_t i = 0; i < len; i += align_len) { - vec_op::BF16Vec32 data((char*)src + i); - vec_op::non_temporal_save(data, (char*)dst + i); - } -} - -void parallel_memcpy(void* dst, void* src, size_t len) { - int thread_num = omp_get_max_threads(); - const size_t partition_num = - (len + 512 * thread_num - 1) / (512 * thread_num); - -#pragma omp parallel for schedule(static, 1) - for (int i = 0; i < thread_num; ++i) { - size_t offset = i * partition_num * 512; - if (offset < len) { - size_t partition_len = std::min(512 * partition_num, len - offset); - memcpy_64bytes((char*)dst + offset, (char*)src + offset, partition_len); - } - } -} - -void gather(SHMContext* ctx, int rank, void* data, size_t len) { - CPU_KERNEL_GUARD_IN(gather) - TORCH_CHECK(len <= ctx->rank_buffer_size); - ctx->barrier(RankStat::READY); - parallel_memcpy(ctx->rank_ptr(rank), data, len); - ctx->barrier(RankStat::DONE); -} - -void broadcast(SHMContext* ctx, int rank, void* data, size_t len) { - CPU_KERNEL_GUARD_IN(broatcast) - ctx->barrier(RankStat::READY); - parallel_memcpy(data, ctx->rank_ptr(0), len); - ctx->barrier(RankStat::DONE); -} - -void scatter(SHMContext* ctx, int rank, void* data, size_t len) { - CPU_KERNEL_GUARD_IN(scatter) - ctx->barrier(RankStat::READY); - parallel_memcpy(data, ctx->rank_ptr(rank), len); - ctx->barrier(RankStat::DONE); -} - -template -void all_reduce_sum_v1(SHMContext* ctx, int rank, scalar_t* data, - size_t elem_num) { - CPU_KERNEL_GUARD_IN(all_reduce_sum_v1) - const size_t bytes = elem_num * sizeof(scalar_t); - TORCH_CHECK(bytes <= ctx->rank_buffer_size); - shm_cc_ops::gather(ctx, rank, data, bytes); - using scalar_vec_t = typename KernelVecType::scalar_vec_t; - constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); - constexpr int CACHELINE_SIZE = 64; - constexpr int PACKED_FACTOR = - CACHELINE_SIZE / (sizeof(scalar_t) * VEC_ELEM_NUM); - TORCH_CHECK(elem_num % VEC_ELEM_NUM == 0); - - ctx->barrier(RankStat::READY); - - int thread_num = omp_get_max_threads(); - size_t partition_num = - (elem_num + thread_num * VEC_ELEM_NUM * PACKED_FACTOR - 1) / - (thread_num * VEC_ELEM_NUM * PACKED_FACTOR); -#pragma omp parallel for schedule(static, 1) - for (int i = 0; i < thread_num; ++i) { - size_t offset = i * partition_num * VEC_ELEM_NUM * PACKED_FACTOR; - if (offset < elem_num) { - const size_t partition_len = std::min( - VEC_ELEM_NUM * PACKED_FACTOR * partition_num, elem_num - offset); - scalar_t* rank_ptrs[RANKS]; - vec_op::unroll_loop([&](int idx) { - rank_ptrs[idx] = ctx->rank_ptr(idx) + offset; - TORCH_CHECK((size_t)rank_ptrs[idx] % 64 == 0); - }); - -#pragma GCC unroll 4 - for (int i = 0; i < partition_len; i += VEC_ELEM_NUM) { - size_t curr_offset = i; - scalar_vec_t data_0(rank_ptrs[0] + curr_offset); - vec_op::FP32Vec16 fp32_data_0(data_0); - vec_op::unroll_loop([&](int k) { - scalar_vec_t data_x(rank_ptrs[k + 1] + curr_offset); - vec_op::FP32Vec16 fp32_data_x(data_x); - fp32_data_0 = fp32_data_0 + fp32_data_x; - }); - data_0 = scalar_vec_t(fp32_data_0); - data_0.save(data + offset + curr_offset); - } - } - } - ctx->barrier(RankStat::DONE); -} - -template -void all_reduce_sum_v2(SHMContext* ctx, int rank, scalar_t* data, - size_t elem_num) { - CPU_KERNEL_GUARD_IN(all_reduce_sum_v2) - const size_t bytes = elem_num * sizeof(scalar_t); - TORCH_CHECK(bytes <= ctx->rank_buffer_size); - shm_cc_ops::gather(ctx, rank, data, bytes); - using scalar_vec_t = typename KernelVecType::scalar_vec_t; - constexpr int VEC_ELEM_NUM = scalar_vec_t::get_elem_num(); - constexpr int CACHELINE_SIZE = 64; - constexpr int PACKED_FACTOR = - CACHELINE_SIZE / (sizeof(scalar_t) * VEC_ELEM_NUM); - TORCH_CHECK(elem_num % VEC_ELEM_NUM == 0); - - ctx->barrier(RankStat::READY); - - const int world_size = ctx->group_size; - const size_t rank_partition_num = - (elem_num + world_size * VEC_ELEM_NUM * PACKED_FACTOR - 1) / - (world_size * VEC_ELEM_NUM * PACKED_FACTOR); - const size_t rank_offset = - rank * rank_partition_num * VEC_ELEM_NUM * PACKED_FACTOR; - - if (rank_offset >= elem_num) { - ctx->barrier(RankStat::DONE); - return; - } - - const size_t rank_elem_num = - std::min(VEC_ELEM_NUM * PACKED_FACTOR * rank_partition_num, - elem_num - rank_offset); - - int thread_num = omp_get_max_threads(); - size_t partition_num = - (rank_elem_num + thread_num * VEC_ELEM_NUM * PACKED_FACTOR - 1) / - (thread_num * VEC_ELEM_NUM * PACKED_FACTOR); - -#pragma omp parallel for schedule(static, 1) - for (int i = 0; i < thread_num; ++i) { - size_t offset = i * partition_num * VEC_ELEM_NUM * PACKED_FACTOR; - if (offset < rank_elem_num) { - const size_t partition_len = std::min( - VEC_ELEM_NUM * PACKED_FACTOR * partition_num, rank_elem_num - offset); - scalar_t* rank_ptrs[RANKS]; - vec_op::unroll_loop([&](int idx) { - rank_ptrs[idx] = ctx->rank_ptr(idx) + rank_offset + offset; - TORCH_CHECK((size_t)rank_ptrs[idx] % 64 == 0); - }); - -#pragma GCC unroll 4 - for (int i = 0; i < partition_len; i += VEC_ELEM_NUM) { - size_t curr_offset = i; - scalar_vec_t data_0(rank_ptrs[0] + curr_offset); - vec_op::FP32Vec16 fp32_data_0(data_0); - vec_op::unroll_loop([&](int k) { - scalar_vec_t data_x(rank_ptrs[k + 1] + curr_offset); - vec_op::FP32Vec16 fp32_data_x(data_x); - fp32_data_0 = fp32_data_0 + fp32_data_x; - }); - data_0 = scalar_vec_t(fp32_data_0); - vec_op::unroll_loop([&](int k) { - vec_op::non_temporal_save(data_0, rank_ptrs[k] + curr_offset); - }); - } - } - } - ctx->barrier(RankStat::DONE); - - shm_cc_ops::scatter(ctx, rank, data, bytes); -} -}; // namespace shm_cc_ops - -class SHMManager { - public: - explicit SHMManager(const std::string& ip_port, const int group_size, - const int rank, const size_t rank_buffer_size) - : _rank(rank), - _shm_names({""}), - _shared_mem_ptrs({nullptr}), - _shm_ctx(nullptr) { - _shm_names[rank] = get_shm_name(ip_port, rank); - _shared_mem_ptrs[rank] = init_shm(rank, rank_buffer_size); - - _shm_ctx = new (_shared_mem_ptrs[rank]) - SHMContext(rank, group_size, round_size(rank_buffer_size)); - } - - void join(const std::string& ip_port, const int group_size, const int rank, - const size_t rank_buffer_size) { - TORCH_CHECK(rank == _rank); - SHMContext* ctx = get_shm_ctx(); - for (int i = 0; i < group_size; ++i) { - if (i != rank) { - TORCH_CHECK(_shm_names[i].empty()); - TORCH_CHECK(_shared_mem_ptrs[i] == nullptr); - - _shm_names[i] = get_shm_name(ip_port, i); - _shared_mem_ptrs[i] = init_shm(i, rank_buffer_size); - ctx->set_context(i, (SHMContext*)_shared_mem_ptrs[i]); - } - } - } - - ~SHMManager() { destroy_shm(); } - - SHMContext* get_shm_ctx() const { - return reinterpret_cast(_shared_mem_ptrs[_rank]); - } - - static std::string get_shm_name(const std::string& ip_port, int rank) { - return "/vllm_" + ip_port + "_" + std::to_string(rank); - } - - private: - static size_t round_size(const size_t size) { - return ((size + 63) >> 6) << 6; - } - - void* init_shm(int target_rank, const size_t rank_buffer_size) { - const std::string& shm_name = _shm_names[target_rank]; - const int local_rank = _rank; - const size_t rounded_rank_buffer_size = round_size(rank_buffer_size); - const size_t shm_size = sizeof(SHMContext) + rounded_rank_buffer_size; - - int fd = -1; - if (local_rank == target_rank) { - fd = shm_open(shm_name.c_str(), O_CREAT | O_EXCL | O_RDWR, - S_IRUSR | S_IWUSR); - - if (fd == -1) - TORCH_CHECK(false, "create shm in SHMManager failed. errno: " + - std::to_string(errno)); - - if (ftruncate(fd, shm_size) == -1) - TORCH_CHECK(false, "ftruncate in SHMManager failed. errno: " + - std::to_string(errno)); - } else { - fd = shm_open(shm_name.c_str(), O_RDWR, S_IRUSR | S_IWUSR); - - if (fd == -1) - TORCH_CHECK(false, "open shm in SHMManager failed. errno: " + - std::to_string(errno)); - } - - void* shm_ptr = mmap(nullptr, shm_size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_POPULATE, fd, 0); - - if (shm_ptr == MAP_FAILED) { - TORCH_CHECK(false, - "mmap in SHMManager failed. errno: " + std::to_string(errno)); - } - - TORCH_CHECK((size_t)shm_ptr % 64 == 0) - - return shm_ptr; - } - - void destroy_shm() { - for (int i = 0; i < MAX_SHM_RANK_NUM; ++i) { - if (!_shm_names[i].empty() && _shared_mem_ptrs[i] != nullptr) { - shm_unlink(_shm_names[i].c_str()); - } - } - } - - int _rank; - std::array _shm_names; - std::array _shared_mem_ptrs; - SHMContext* _shm_ctx; -}; - -static std::unique_ptr shm_manager_singleton = nullptr; - -template -void shm_allreduce_sum(SHMContext* ctx, const int rank, scalar_t* data, - size_t elem_num) { - switch (ctx->group_size) { - case 2: - shm_cc_ops::all_reduce_sum_v1(ctx, rank, data, elem_num); - break; - case 4: - shm_cc_ops::all_reduce_sum_v1(ctx, rank, data, elem_num); - break; - case 8: - shm_cc_ops::all_reduce_sum_v2(ctx, rank, data, elem_num); - break; - default: - TORCH_CHECK(false, - "Invalid world size: " + std::to_string(ctx->group_size)); - } -} - -template -void shm_gather_impl(SHMContext* ctx, const int rank, scalar_t* data, - size_t elem_num, scalar_t** outputs, const int dst) { - CPU_KERNEL_GUARD_IN(shm_gather_impl) - const int worldsize = ctx->group_size; - const size_t bytes = elem_num * sizeof(scalar_t); - TORCH_CHECK(bytes <= ctx->rank_buffer_size); - shm_cc_ops::gather(ctx, rank, data, bytes); - - ctx->barrier(RankStat::READY); - if (rank != dst) { - ctx->barrier(RankStat::DONE); - return; - } - const int thread_num = std::max(omp_get_max_threads() / worldsize, 1); - const size_t partition_num = - (bytes + 512 * thread_num - 1) / (512 * thread_num); - -#pragma omp parallel for collapse(2) schedule(static) - for (int target_rank = 0; target_rank < worldsize; ++target_rank) { - for (int i = 0; i < thread_num; ++i) { - size_t offset = i * partition_num * 512; - if (offset < bytes) { - size_t partition_len = std::min(512 * partition_num, bytes - offset); - shm_cc_ops::memcpy_64bytes( - (char*)(outputs[target_rank]) + offset, - (char*)(ctx->rank_ptr(target_rank)) + offset, - partition_len); - } - } - } - - ctx->barrier(RankStat::DONE); - return; -} - -} // namespace - -void shm_gather(torch::Tensor& data, - const std::optional>& outputs, - int64_t dst, int64_t rank) { - TORCH_CHECK(data.is_contiguous()) - VLLM_DISPATCH_FLOATING_TYPES(data.scalar_type(), "shm_gather_impl", [&] { - CPU_KERNEL_GUARD_IN(shm_gather_impl) - - if (outputs.has_value()) { - TORCH_CHECK_LE(outputs->size(), MAX_SHM_RANK_NUM); - scalar_t* output_ptrs[MAX_SHM_RANK_NUM] = {nullptr}; - for (int i = 0; i < outputs->size(); ++i) { - output_ptrs[i] = outputs->at(i).data_ptr(); - } - shm_gather_impl(shm_manager_singleton->get_shm_ctx(), rank, - data.data_ptr(), data.numel(), output_ptrs, - dst); - } else { - shm_gather_impl(shm_manager_singleton->get_shm_ctx(), rank, - data.data_ptr(), data.numel(), (scalar_t**)(0), - dst); - } - - CPU_KERNEL_GUARD_OUT(shm_gather_impl) - }); -} - -void shm_allreduce(torch::Tensor& data, int64_t rank) { - TORCH_CHECK(data.is_contiguous()) - VLLM_DISPATCH_FLOATING_TYPES(data.scalar_type(), "shm_allreduce_sum", [&] { - CPU_KERNEL_GUARD_IN(shm_allreduce_sum) - shm_allreduce_sum(shm_manager_singleton->get_shm_ctx(), rank, - data.data_ptr(), data.numel()); - CPU_KERNEL_GUARD_OUT(shm_allreduce_sum) - }); -} - -void init_shm_manager(const std::string& ip_port, const int64_t group_size, - const int64_t rank, const int64_t rank_buffer_size) { - if (shm_manager_singleton == nullptr) { - shm_manager_singleton = std::make_unique( - ip_port, group_size, rank, rank_buffer_size); - } else { - TORCH_CHECK( - false, - "Duplicate initialization of shm_manager_singleton is not allowed.") - } -} - -std::string join_shm_manager(const std::string& ip_port, - const int64_t group_size, const int64_t rank, - const int64_t rank_buffer_size) { - TORCH_CHECK(shm_manager_singleton); - shm_manager_singleton->join(ip_port, group_size, rank, rank_buffer_size); - return shm_manager_singleton->get_shm_ctx()->to_string(); -} \ No newline at end of file diff --git a/csrc/cpu/torch_bindings.cpp b/csrc/cpu/torch_bindings.cpp index 179b75c1fcecd..7d549e271a30d 100644 --- a/csrc/cpu/torch_bindings.cpp +++ b/csrc/cpu/torch_bindings.cpp @@ -6,19 +6,6 @@ void init_cpu_threads_env(const std::string& cpu_ids); -void shm_gather(torch::Tensor& data, - const std::optional>& outputs, - int64_t dst, int64_t rank); - -void shm_allreduce(torch::Tensor& data, int64_t rank); - -void init_shm_manager(const std::string& ip_port, const int64_t group_size, - const int64_t rank, const int64_t rank_buffer_size); - -std::string join_shm_manager(const std::string& ip_port, - const int64_t group_size, const int64_t rank, - const int64_t rank_buffer_size); - TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // vLLM custom ops @@ -97,19 +84,6 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { " Tensor! key, int head_size," " Tensor cos_sin_cache, bool is_neox) -> ()"); ops.impl("rotary_embedding", torch::kCPU, &rotary_embedding); - - // SHM based all-reduce - ops.def( - "init_shm_manager(str ip_port, int group_size, int rank, int " - "rank_buffer_size) -> ()", - &init_shm_manager); - ops.def( - "join_shm_manager(str ip_port, int group_size, int rank, int " - "rank_buffer_size) -> str", - &join_shm_manager); - ops.def("shm_allreduce(Tensor! data, int rank) -> ()"); - ops.impl("shm_allreduce", torch::kCPU, &shm_allreduce); - ops.def("shm_gather", &shm_gather); } TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cache_ops), cache_ops) { diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 356f833e6c25c..e9c6fc3a255e4 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -297,8 +297,7 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: if (pynccl_comm is not None and not pynccl_comm.disabled): pynccl_comm.all_reduce(input_) else: - # torch.distributed.all_reduce(input_, group=self.device_group) - torch.ops._C.shm_allreduce(input_, self.rank) + torch.distributed.all_reduce(input_, group=self.device_group) return input_ def all_gather(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor: diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 6511ac6b1b1d7..7251437461583 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -61,7 +61,6 @@ def _init_executor(self) -> None: # 127.0.0.1 for communication. ip = "127.0.0.1" port = get_open_port() - self.ip_port = ip + "_" + str(port) self.distributed_init_method = get_distributed_init_method(ip, port) is_async = isinstance(self, CPUExecutorAsync) @@ -112,10 +111,6 @@ def _init_executor(self) -> None: self._run_workers("init_device") self._run_workers("load_model") - if world_size > 1: - self._run_workers("init_shm_manager") - self._run_workers("join_shm_manager") - def _create_worker( self, local_rank: int = 0, @@ -140,7 +135,6 @@ def _create_worker( load_config=self.load_config, local_rank=local_rank, rank=rank, - ip_port = self.ip_port, distributed_init_method=self.distributed_init_method, lora_config=self.lora_config, multimodal_config=self.multimodal_config, diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index a2af80af9fe12..3d0e1daa52113 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -11,7 +11,7 @@ ModelConfig, MultiModalConfig, ParallelConfig, PromptAdapterConfig, SchedulerConfig) from vllm.distributed import (ensure_model_parallel_initialized, - init_distributed_environment, parallel_state) + init_distributed_environment) from vllm.logger import init_logger from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest @@ -131,7 +131,6 @@ def __init__( load_config: LoadConfig, local_rank: int, rank: int, - ip_port: str, distributed_init_method: str, lora_config: Optional[LoRAConfig] = None, multimodal_config: Optional[MultiModalConfig] = None, @@ -147,7 +146,6 @@ def __init__( self.load_config = load_config self.local_rank = local_rank self.rank = rank - self.ip_port = ip_port self.distributed_init_method = distributed_init_method self.lora_config = lora_config self.prompt_adapter_config = prompt_adapter_config @@ -342,33 +340,3 @@ def get_cache_block_size_bytes(self) -> int: return CPUCacheEngine.get_cache_block_size( self.cache_config.block_size, self.cache_config.cache_dtype, self.model_config, self.parallel_config) - - def init_shm_manager(self): - elem_size = torch.tensor([], - dtype=self.model_config.dtype).element_size() - world_size = parallel_state.get_tensor_model_parallel_world_size() - hidden_size = self.model_config.get_hidden_size() - rank_buffer_size = (self.model_config.max_model_len * hidden_size * - 5 // world_size * elem_size) - torch.ops._C.init_shm_manager( - self.ip_port, - parallel_state.get_tensor_model_parallel_world_size(), - parallel_state.get_tensor_model_parallel_rank(), - rank_buffer_size, - ) - - def join_shm_manager(self): - elem_size = torch.tensor([], - dtype=self.model_config.dtype).element_size() - world_size = parallel_state.get_tensor_model_parallel_world_size() - hidden_size = self.model_config.get_hidden_size() - rank_buffer_size = (self.model_config.max_model_len * hidden_size * - 5 // world_size * elem_size) - ret = torch.ops._C.join_shm_manager( - self.ip_port, - parallel_state.get_tensor_model_parallel_world_size(), - parallel_state.get_tensor_model_parallel_rank(), - rank_buffer_size, - ) - print("rank: ", parallel_state.get_tensor_model_parallel_rank()) - print(ret) From 4a13516aed6abc7c280e4df260a6f936c0bb269b Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 12 Jul 2024 06:49:53 +0000 Subject: [PATCH 23/36] Add IPEX Allreduce --- Dockerfile.cpu | 5 ++-- csrc/cpu/utils.cpp | 26 +++++++++---------- .../getting_started/cpu-installation.rst | 2 -- requirements-cpu.txt | 4 +-- vllm/distributed/parallel_state.py | 3 +++ vllm/executor/cpu_executor.py | 26 +++++++++++++++---- vllm/worker/cpu_worker.py | 2 -- 7 files changed, 41 insertions(+), 27 deletions(-) diff --git a/Dockerfile.cpu b/Dockerfile.cpu index 10729049b8690..c13ebb6af6118 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -13,10 +13,9 @@ RUN pip install intel-openmp ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so:$LD_PRELOAD" - RUN echo 'ulimit -c 0' >> ~/.bashrc -RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/cpu/intel_extension_for_pytorch-2.3.100%2Bgit0eb3473-cp310-cp310-linux_x86_64.whl +RUN pip install --proxy http://child-prc.intel.com:913 http://mlpc.intel.com/downloads/cpu/ipex-2.4/rc0/intel_extension_for_pytorch-2.4.0-cp310-cp310-manylinux2014_x86_64.whl RUN pip install --upgrade pip \ && pip install wheel packaging ninja "setuptools>=49.4.0" numpy @@ -27,7 +26,7 @@ COPY ./ /workspace/vllm WORKDIR /workspace/vllm -RUN pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu +RUN pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/test/cpu # Support for building with non-AVX512 vLLM: docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" ... ARG VLLM_CPU_DISABLE_AVX512 diff --git a/csrc/cpu/utils.cpp b/csrc/cpu/utils.cpp index e8471230022d5..a2495b34de1c4 100644 --- a/csrc/cpu/utils.cpp +++ b/csrc/cpu/utils.cpp @@ -47,19 +47,19 @@ void init_cpu_threads_env(const std::string& cpu_ids) { } // OMP threads binding - omp_set_num_threads((int)omp_cpu_ids.size()); - torch::set_num_threads((int)omp_cpu_ids.size()); - TORCH_CHECK_EQ(omp_cpu_ids.size(), torch::get_num_threads()); - TORCH_CHECK_EQ(omp_cpu_ids.size(), omp_get_max_threads()); - #pragma omp parallel for schedule(static, 1) - for (size_t i = 0; i < omp_cpu_ids.size(); ++i) { - cpu_set_t* mask = CPU_ALLOC(omp_cpu_mask->size); - size_t size = CPU_ALLOC_SIZE(omp_cpu_mask->size); - CPU_ZERO_S(size, mask); - CPU_SET_S(omp_cpu_ids[i], size, mask); - sched_setaffinity(0, sizeof(cpu_set_t), mask); - CPU_FREE(mask); - } + omp_set_num_threads((int)omp_cpu_ids.size()); + torch::set_num_threads((int)omp_cpu_ids.size()); + TORCH_CHECK_EQ(omp_cpu_ids.size(), torch::get_num_threads()); + TORCH_CHECK_EQ(omp_cpu_ids.size(), omp_get_max_threads()); +#pragma omp parallel for schedule(static, 1) + for (size_t i = 0; i < omp_cpu_ids.size(); ++i) { + cpu_set_t* mask = CPU_ALLOC(omp_cpu_mask->size); + size_t size = CPU_ALLOC_SIZE(omp_cpu_mask->size); + CPU_ZERO_S(size, mask); + CPU_SET_S(omp_cpu_ids[i], size, mask); + sched_setaffinity(0, sizeof(cpu_set_t), mask); + CPU_FREE(mask); + } numa_free_nodemask(omp_cpu_mask); } \ No newline at end of file diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 0f3155638bbe3..11dce413651ba 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -88,8 +88,6 @@ Intel Extension for PyTorch - `Intel Extension for PyTorch (IPEX) `_ extends PyTorch with up-to-date features optimizations for an extra performance boost on Intel hardware. -- IPEX after the ``2.3.0`` can be enabled in the CPU backend by default if it is installed. - .. _cpu_backend_performance_tips: Performance tips diff --git a/requirements-cpu.txt b/requirements-cpu.txt index 754070df21c0a..a8ce104d83290 100644 --- a/requirements-cpu.txt +++ b/requirements-cpu.txt @@ -2,6 +2,6 @@ -r requirements-common.txt # Dependencies for x86_64 CPUs -torch == 2.3.1+cpu; platform_machine != "ppc64le" -torchvision == 0.18.1+cpu; platform_machine != "ppc64le" # required for the image processor of phi3v, this must be updated alongside torch +torch == 2.4.0; platform_machine != "ppc64le" +torchvision; platform_machine != "ppc64le" # required for the image processor of phi3v, this must be updated alongside torch triton >= 2.2.0 # FIXME(woosuk): This is a hack to avoid import error. diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index e9c6fc3a255e4..58cae46d9af27 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -296,6 +296,9 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: pynccl_comm = self.pynccl_comm if (pynccl_comm is not None and not pynccl_comm.disabled): pynccl_comm.all_reduce(input_) + elif input_.is_cpu: + import intel_extension_for_pytorch as ipex + ipex.distributed.all_reduce(input_, group=self.device_group) else: torch.distributed.all_reduce(input_, group=self.device_group) return input_ diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 7251437461583..3229e5ad20afa 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -37,7 +37,7 @@ def _init_executor(self) -> None: # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" - + # Intel OpenMP setting ld_prealod_str = os.getenv("LD_PRELOAD", "") if "libiomp5.so" in ld_prealod_str: @@ -51,6 +51,10 @@ def _init_executor(self) -> None: os.environ['KMP_PLAIN_BARRIER_PATTERN'] = "dist,dist" os.environ['KMP_REDUCTION_BARRIER_PATTERN'] = "dist,dist" + # To hint IPEX uses shared memory based AllReduce + os.environ["LOCAL_WORLD_SIZE"] = str( + self.parallel_config.tensor_parallel_size) + self.model_config = _verify_and_get_model_config(self.model_config) self.cache_config = _verify_and_get_cache_config(self.cache_config) self.scheduler_config = _verify_and_get_scheduler_config( @@ -252,16 +256,28 @@ def list_loras(self) -> Set[int]: def add_prompt_adapter( self, prompt_adapter_request: PromptAdapterRequest) -> bool: - return self.driver_worker.add_prompt_adapter(prompt_adapter_request) + return all( + self._run_workers( + "add_prompt_adapter", + prompt_adapter_request, + )) def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool: - return self.driver_worker.remove_prompt_adapter(prompt_adapter_id) + return all( + self._run_workers( + "remove_prompt_adapter", + prompt_adapter_id, + )) def list_prompt_adapters(self) -> Set[int]: - return self.driver_worker.list_prompt_adapters() + return self.driver_method_invoker(self.driver_worker, + "list_prompt_adapters") def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool: - return self.driver_worker.pin_prompt_adapter(prompt_adapter_id) + return all(self._run_workers( + "pin_prompt_adapter", + prompt_adapter_id, + )) def check_health(self) -> None: """Raises an error if engine is unhealthy.""" diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 3d0e1daa52113..735d48c908d61 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -1,6 +1,5 @@ """A CPU worker class.""" from typing import Dict, List, Optional, Tuple -import os import torch import torch.distributed @@ -183,7 +182,6 @@ def __init__( self.cache_engine: List[CPUCacheEngine] self.cpu_cache: List[List[torch.Tensor]] - def init_device(self) -> None: if self.local_omp_cpuid != "all": torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid) From ecd2cac45e8bf552fa2123190709e26c75de9027 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Mon, 15 Jul 2024 03:50:04 +0000 Subject: [PATCH 24/36] remove prune --- .buildkite/run-cpu-test.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index f5725a271c2f4..20a7678d31f77 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -2,8 +2,6 @@ # It serves a sanity check for compilation and basic model usage. set -ex -docker image prune -f - # Try building the docker image numactl -C 48-95 -N 1 docker build -t cpu-test -f Dockerfile.cpu . numactl -C 48-95 -N 1 docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu . From 6fb8f90954400e6ffcf1d8ce48991e58b6577048 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Tue, 16 Jul 2024 08:34:08 +0000 Subject: [PATCH 25/36] update IPEX --- Dockerfile.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.cpu b/Dockerfile.cpu index c13ebb6af6118..c473ba431e680 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -15,7 +15,7 @@ ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/li RUN echo 'ulimit -c 0' >> ~/.bashrc -RUN pip install --proxy http://child-prc.intel.com:913 http://mlpc.intel.com/downloads/cpu/ipex-2.4/rc0/intel_extension_for_pytorch-2.4.0-cp310-cp310-manylinux2014_x86_64.whl +RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/cpu/intel_extension_for_pytorch-2.4.0%2Bgitfbaa4bc-cp310-cp310-linux_x86_64.whl RUN pip install --upgrade pip \ && pip install wheel packaging ninja "setuptools>=49.4.0" numpy From 43766b5e96e42d21aa6507ecfd692655f715f86d Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 19 Jul 2024 07:37:25 +0000 Subject: [PATCH 26/36] update doc --- .buildkite/run-cpu-test.sh | 4 ++-- docs/source/getting_started/cpu-installation.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 20a7678d31f77..6876af86a3dfa 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -13,9 +13,9 @@ remove_docker_container # Run the image docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ - --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test cpu-test + --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ - --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test-avx2 cpu-test-avx2 + --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2 # offline inference docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 11dce413651ba..1718996a0786f 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -79,7 +79,7 @@ Related runtime environment variables - ``VLLM_CPU_KVCACHE_SPACE``: specify the KV Cache size (e.g, ``VLLM_CPU_KVCACHE_SPACE=40`` means 40 GB space for KV cache), larger setting will allow vLLM running more requests in parallel. This parameter should be set based on the hardware configuration and memory management pattern of users. -- ``VLLM_CPU_OMP_THREADS_BIND``: specify the CPU cores dedicated to the OpenMP threads. For example, ``VLLM_CPU_OMP_THREADS_BIND=0-31`` means there will be 32 OpenMP threads bound on 0-31 CPU cores. +- ``VLLM_CPU_OMP_THREADS_BIND``: specify the CPU cores dedicated to the OpenMP threads. For example, ``VLLM_CPU_OMP_THREADS_BIND=0-31`` means there will be 32 OpenMP threads bound on 0-31 CPU cores. ``VLLM_CPU_OMP_THREADS_BIND=0-31|32-63`` means there will be 2 tensor parallel processes, 32 OpenMP threads of rank0 are bound on 0-31 CPU cores, and the OpenMP threads of rank1 are bound on 32-63 CPU cores. .. _ipex_guidance: From eafb75f68aee8b8c72b9f69d58acc8053f19804b Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Mon, 22 Jul 2024 07:00:20 +0000 Subject: [PATCH 27/36] retrigger From cfbeae51f3a0e8081aec5e5ff6019aa66b755209 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Mon, 22 Jul 2024 10:46:44 +0000 Subject: [PATCH 28/36] fix comments --- .buildkite/run-cpu-test.sh | 2 +- .../getting_started/cpu-installation.rst | 33 +++++++++++++++++-- vllm/envs.py | 6 ++-- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 6876af86a3dfa..5d0cc4e4d1dd2 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -11,7 +11,7 @@ remove_docker_container() { docker rm -f cpu-test cpu-test-avx2 || true; } trap remove_docker_container EXIT remove_docker_container -# Run the image +# Run the image, setting --shm-size=4g for tensor parallel. docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 1718996a0786f..0418e39609c2c 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -108,9 +108,38 @@ Performance tips $ export VLLM_CPU_KVCACHE_SPACE=40 $ export VLLM_CPU_OMP_THREADS_BIND=0-29 - $ python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m + $ vllm serve facebook/opt-125m -- If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU core using ``VLLM_CPU_OMP_THREADS_BIND``. +- If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU core using ``VLLM_CPU_OMP_THREADS_BIND``. On a hyper-threading enabled platform with 16 logical CPU cores / 8 physical CPU cores: + +.. code-block:: console + + $ lscpu -e # check the mapping between logical CPU cores and physical CPU cores + +.. code-block:: console + # The "CPU" column means the logical CPU core IDs, and the "CORE" column means the physical core IDs. On this platform, two logical cores are sharing one physical core. + CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE MAXMHZ MINMHZ MHZ + 0 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 + 1 0 0 1 1:1:1:0 yes 2401.0000 800.0000 800.000 + 2 0 0 2 2:2:2:0 yes 2401.0000 800.0000 800.000 + 3 0 0 3 3:3:3:0 yes 2401.0000 800.0000 800.000 + 4 0 0 4 4:4:4:0 yes 2401.0000 800.0000 800.000 + 5 0 0 5 5:5:5:0 yes 2401.0000 800.0000 800.000 + 6 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 + 7 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 + 8 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 + 9 0 0 1 1:1:1:0 yes 2401.0000 800.0000 800.000 + 10 0 0 2 2:2:2:0 yes 2401.0000 800.0000 800.000 + 11 0 0 3 3:3:3:0 yes 2401.0000 800.0000 800.000 + 12 0 0 4 4:4:4:0 yes 2401.0000 800.0000 800.000 + 13 0 0 5 5:5:5:0 yes 2401.0000 800.0000 800.000 + 14 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 + 15 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 + +.. code-block:: console + $ # On this platform, it is recommend to only bind openMP threads on logical CPU cores 0-7 or 8-15 + $ export VLLM_CPU_OMP_THREADS_BIND=0-7 + $ python examples/offline_inference.py - If using vLLM CPU backend on a multi-socket machine with NUMA, be aware to set CPU cores using ``VLLM_CPU_OMP_THREADS_BIND`` to avoid cross NUMA node memory access. diff --git a/vllm/envs.py b/vllm/envs.py index 7fbbe28cb34c8..f06b6d66ea6f4 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -242,13 +242,13 @@ def get_default_config_root(): "VLLM_ATTENTION_BACKEND": lambda: os.getenv("VLLM_ATTENTION_BACKEND", None), - # CPU key-value cache space + # (CPU backend only) CPU key-value cache space. # default is 4GB "VLLM_CPU_KVCACHE_SPACE": lambda: int(os.getenv("VLLM_CPU_KVCACHE_SPACE", "0")), - # CPU core ids bound by OpenMP threads, e.g., "0-31", "0,1,2", - # "0-31,33". CPU cores of different ranks are separated by '|'. + # (CPU backend only) CPU core ids bound by OpenMP threads, e.g., "0-31", + # "0,1,2", "0-31,33". CPU cores of different ranks are separated by '|'. "VLLM_CPU_OMP_THREADS_BIND": lambda: os.getenv("VLLM_CPU_OMP_THREADS_BIND", "all"), From 3f7384fbb9d6da3a286ae408d5e4d240f4ee3dbc Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Mon, 22 Jul 2024 19:19:37 +0800 Subject: [PATCH 29/36] Update csrc/cpu/utils.cpp Co-authored-by: Woosuk Kwon --- csrc/cpu/utils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/csrc/cpu/utils.cpp b/csrc/cpu/utils.cpp index a2495b34de1c4..5782580baa861 100644 --- a/csrc/cpu/utils.cpp +++ b/csrc/cpu/utils.cpp @@ -62,4 +62,4 @@ void init_cpu_threads_env(const std::string& cpu_ids) { } numa_free_nodemask(omp_cpu_mask); -} \ No newline at end of file +} From cc4b330547dfafae6b82b6b82ef23ecf4dff9981 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Mon, 22 Jul 2024 11:33:14 +0000 Subject: [PATCH 30/36] fix doc --- docs/source/getting_started/cpu-installation.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 0418e39609c2c..7fc469e06844f 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -116,7 +116,6 @@ Performance tips $ lscpu -e # check the mapping between logical CPU cores and physical CPU cores -.. code-block:: console # The "CPU" column means the logical CPU core IDs, and the "CORE" column means the physical core IDs. On this platform, two logical cores are sharing one physical core. CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE MAXMHZ MINMHZ MHZ 0 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 @@ -136,8 +135,7 @@ Performance tips 14 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 15 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 -.. code-block:: console - $ # On this platform, it is recommend to only bind openMP threads on logical CPU cores 0-7 or 8-15 + # On this platform, it is recommend to only bind openMP threads on logical CPU cores 0-7 or 8-15 $ export VLLM_CPU_OMP_THREADS_BIND=0-7 $ python examples/offline_inference.py From 3268497526677840cceb07bc2398f5077271e201 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Mon, 22 Jul 2024 13:40:57 +0000 Subject: [PATCH 31/36] retrigger From 63e085c9e4a3683f6d115d88a634fe69f6b0c0fd Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Wed, 24 Jul 2024 02:19:28 +0000 Subject: [PATCH 32/36] retrigger From 7f48a4f63430ec3f5f3fa1cdca59bfaf69d4d896 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Thu, 25 Jul 2024 10:50:59 +0000 Subject: [PATCH 33/36] remove assert --- vllm/engine/async_llm_engine.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 16b7bc64a2849..93cc319f11c42 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -410,8 +410,6 @@ def _get_executor_cls( from vllm.executor.tpu_executor import TPUExecutorAsync executor_class = TPUExecutorAsync elif engine_config.device_config.device_type == "cpu": - assert distributed_executor_backend is None, ( - "Distributed execution is not supported with the CPU backend.") from vllm.executor.cpu_executor import CPUExecutorAsync executor_class = CPUExecutorAsync elif engine_config.device_config.device_type == "openvino": From c87f0aba722bf4bf2f7a4f2c82a63ef1780433f2 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 26 Jul 2024 05:44:48 +0000 Subject: [PATCH 34/36] add time out --- .buildkite/run-cpu-test.sh | 48 +++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 5d0cc4e4d1dd2..e2200352581fe 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -17,26 +17,32 @@ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/hugg docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2 -# offline inference -docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" +function cpu_tests() { + # offline inference + docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" -# Run basic model test -docker exec cpu-test bash -c " - pip install pytest Pillow protobuf - pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported + # Run basic model test + docker exec cpu-test bash -c " + pip install pytest Pillow protobuf + pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported -# online inference -docker exec cpu-test bash -c " - export VLLM_CPU_KVCACHE_SPACE=10 - export VLLM_CPU_OMP_THREADS_BIND=48-92 - python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & - wget -q https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json - timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 - python3 benchmarks/benchmark_serving.py \ - --backend vllm \ - --dataset-name sharegpt \ - --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json \ - --model facebook/opt-125m \ - --num-prompts 20 \ - --endpoint /v1/completions \ - --tokenizer facebook/opt-125m" + # online inference + docker exec cpu-test bash -c " + export VLLM_CPU_KVCACHE_SPACE=10 + export VLLM_CPU_OMP_THREADS_BIND=48-92 + python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & + wget -q https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 + python3 benchmarks/benchmark_serving.py \ + --backend vllm \ + --dataset-name sharegpt \ + --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json \ + --model facebook/opt-125m \ + --num-prompts 20 \ + --endpoint /v1/completions \ + --tokenizer facebook/opt-125m" +} + +# All of CPU tests are expected to be finished less than 20 mins. +export -f cpu_tests +timeout 20m cpu_tests \ No newline at end of file From a5a13cdc5273f5042019ed7bcee71aab4f6001de Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 26 Jul 2024 09:08:52 +0000 Subject: [PATCH 35/36] Revert "add time out" This reverts commit c87f0aba722bf4bf2f7a4f2c82a63ef1780433f2. --- .buildkite/run-cpu-test.sh | 48 +++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index e2200352581fe..5d0cc4e4d1dd2 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -17,32 +17,26 @@ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/hugg docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \ --cpuset-mems=1 --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2 -function cpu_tests() { - # offline inference - docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" +# offline inference +docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py" - # Run basic model test - docker exec cpu-test bash -c " - pip install pytest Pillow protobuf - pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported +# Run basic model test +docker exec cpu-test bash -c " + pip install pytest Pillow protobuf + pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported - # online inference - docker exec cpu-test bash -c " - export VLLM_CPU_KVCACHE_SPACE=10 - export VLLM_CPU_OMP_THREADS_BIND=48-92 - python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & - wget -q https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json - timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 - python3 benchmarks/benchmark_serving.py \ - --backend vllm \ - --dataset-name sharegpt \ - --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json \ - --model facebook/opt-125m \ - --num-prompts 20 \ - --endpoint /v1/completions \ - --tokenizer facebook/opt-125m" -} - -# All of CPU tests are expected to be finished less than 20 mins. -export -f cpu_tests -timeout 20m cpu_tests \ No newline at end of file +# online inference +docker exec cpu-test bash -c " + export VLLM_CPU_KVCACHE_SPACE=10 + export VLLM_CPU_OMP_THREADS_BIND=48-92 + python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & + wget -q https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 + python3 benchmarks/benchmark_serving.py \ + --backend vllm \ + --dataset-name sharegpt \ + --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json \ + --model facebook/opt-125m \ + --num-prompts 20 \ + --endpoint /v1/completions \ + --tokenizer facebook/opt-125m" From aceded908c3a6cdcd1d7e7167278e5192de394d1 Mon Sep 17 00:00:00 2001 From: "jiang1.li" Date: Fri, 26 Jul 2024 09:13:30 +0000 Subject: [PATCH 36/36] using random dataset --- .buildkite/run-cpu-test.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 5d0cc4e4d1dd2..21deec2bba973 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -30,12 +30,10 @@ docker exec cpu-test bash -c " export VLLM_CPU_KVCACHE_SPACE=10 export VLLM_CPU_OMP_THREADS_BIND=48-92 python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & - wget -q https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 python3 benchmarks/benchmark_serving.py \ --backend vllm \ - --dataset-name sharegpt \ - --dataset ./ShareGPT_V3_unfiltered_cleaned_split.json \ + --dataset-name random \ --model facebook/opt-125m \ --num-prompts 20 \ --endpoint /v1/completions \