From 8549c82660cfa59a13cccd622f8afcc29cbd4281 Mon Sep 17 00:00:00 2001
From: youkaichao
Date: Sun, 27 Oct 2024 00:19:28 -0700
Subject: [PATCH 01/38] [core] cudagraph output with tensor weak reference
(#9724)
Signed-off-by: youkaichao
---
csrc/ops.h | 24 +++++++++++++++++++++
csrc/torch_bindings.cpp | 3 +++
vllm/utils.py | 9 ++++++++
vllm/worker/model_runner.py | 42 +++++++++++++------------------------
4 files changed, 50 insertions(+), 28 deletions(-)
diff --git a/csrc/ops.h b/csrc/ops.h
index f737f50c2ec96..c50eb39a3dacc 100644
--- a/csrc/ops.h
+++ b/csrc/ops.h
@@ -5,6 +5,30 @@
#include "core/scalar_type.hpp"
+#include
+
+torch::Tensor weak_ref_tensor(torch::Tensor& tensor) {
+ // Ensure tensor is on CUDA
+ if (!tensor.is_cuda()) {
+ throw std::runtime_error("Tensor must be on CUDA device");
+ }
+
+ // Get the raw data pointer
+ void* data_ptr = tensor.data_ptr();
+
+ // Get tensor sizes and strides
+ std::vector sizes = tensor.sizes().vec();
+ std::vector strides = tensor.strides().vec();
+
+ // Get tensor options (dtype, device)
+ auto options = tensor.options();
+
+ // Create a new tensor from the raw data pointer
+ auto new_tensor = torch::from_blob(data_ptr, sizes, strides, options);
+
+ return new_tensor;
+}
+
void paged_attention_v1(
torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache,
torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp
index e704ff629fd6e..b8185c24d5628 100644
--- a/csrc/torch_bindings.cpp
+++ b/csrc/torch_bindings.cpp
@@ -18,6 +18,9 @@
TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
// vLLM custom ops
+ ops.def("weak_ref_tensor(Tensor input) -> Tensor");
+ ops.impl("weak_ref_tensor", torch::kCUDA, &weak_ref_tensor);
+
// Attention ops
// Compute the attention between an input query and the cached
// keys/values using PagedAttention.
diff --git a/vllm/utils.py b/vllm/utils.py
index fba9804289b94..1f75de89d0cc2 100644
--- a/vllm/utils.py
+++ b/vllm/utils.py
@@ -1479,3 +1479,12 @@ def __iter__(self):
def __len__(self):
return len(self._factory)
+
+
+def weak_ref_tensor(tensor: torch.Tensor) -> torch.Tensor:
+ """
+ Create a weak reference to a tensor.
+ The new tensor will share the same data as the original tensor,
+ but will not keep the original tensor alive.
+ """
+ return torch.ops._C.weak_ref_tensor(tensor)
diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py
index 8b74f06e77be0..4a287e3741d0f 100644
--- a/vllm/worker/model_runner.py
+++ b/vllm/worker/model_runner.py
@@ -50,7 +50,7 @@
from vllm.transformers_utils.config import uses_mrope
from vllm.utils import (DeviceMemoryProfiler, PyObjectCache, async_tensor_h2d,
flatten_2d_lists, is_hip, is_pin_memory_available,
- supports_dynamo)
+ supports_dynamo, weak_ref_tensor)
from vllm.worker.model_runner_base import (
ModelRunnerBase, ModelRunnerInputBase, ModelRunnerInputBuilderBase,
_add_attn_metadata_broadcastable_dict,
@@ -1426,12 +1426,6 @@ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None:
dtype=self.model_config.dtype,
device=self.device)
- # Prepare buffer for outputs. These will be reused for all batch sizes.
- # It will be filled after the first graph capture.
- hidden_or_intermediate_states: List[Optional[torch.Tensor]] = [
- None
- ] * self.parallel_config.pipeline_parallel_size
-
graph_batch_size = self.max_batchsize_to_capture
batch_size_capture_list = [
bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size
@@ -1474,12 +1468,6 @@ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None:
input_tokens[:batch_size],
"positions":
input_positions[..., :batch_size],
- "hidden_or_intermediate_states":
- hidden_or_intermediate_states[
- virtual_engine] # type: ignore
- [:batch_size]
- if hidden_or_intermediate_states[virtual_engine]
- is not None else None,
"intermediate_inputs":
intermediate_inputs[:batch_size]
if intermediate_inputs is not None else None,
@@ -1762,15 +1750,13 @@ def capture(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
- hidden_or_intermediate_states: Optional[Union[IntermediateTensors,
- torch.Tensor]],
intermediate_inputs: Optional[IntermediateTensors],
kv_caches: List[torch.Tensor],
attn_metadata: AttentionMetadata,
memory_pool: Optional[Tuple[int, int]],
stream: torch.cuda.Stream,
**kwargs,
- ) -> Union[torch.Tensor, IntermediateTensors]:
+ ):
assert self._graph is None
# Run the model a few times without capturing the graph.
# This is to make sure that the captured graph does not include the
@@ -1799,20 +1785,21 @@ def capture(
intermediate_tensors=intermediate_inputs,
**kwargs,
)
- if hidden_or_intermediate_states is not None:
- if get_pp_group().is_last_rank:
- hidden_or_intermediate_states.copy_(
- output_hidden_or_intermediate_states)
- else:
- for key in hidden_or_intermediate_states.tensors:
- hidden_or_intermediate_states[key].copy_(
- output_hidden_or_intermediate_states[key])
- else:
- hidden_or_intermediate_states = (
+
+ if isinstance(output_hidden_or_intermediate_states, torch.Tensor):
+ hidden_or_intermediate_states = weak_ref_tensor(
output_hidden_or_intermediate_states)
+ elif isinstance(output_hidden_or_intermediate_states,
+ IntermediateTensors):
+ hidden_or_intermediate_states = IntermediateTensors(
+ tensors={
+ key: weak_ref_tensor(value)
+ for key, value in
+ output_hidden_or_intermediate_states.tensors.items()
+ })
del output_hidden_or_intermediate_states
- # make sure `output_hidden_states` is deleted
+ # make sure `output_hidden_or_intermediate_states` is deleted
# in the graph's memory pool
gc.collect()
torch.cuda.synchronize()
@@ -1837,7 +1824,6 @@ def capture(
}
else:
self.output_buffers = hidden_or_intermediate_states
- return hidden_or_intermediate_states
def forward(
self,
From 3cb07a36a20f9af11346650559470d685e9dc711 Mon Sep 17 00:00:00 2001
From: bnellnm <49004751+bnellnm@users.noreply.github.com>
Date: Sun, 27 Oct 2024 05:44:24 -0400
Subject: [PATCH 02/38] [Misc] Upgrade to pytorch 2.5 (#9588)
Signed-off-by: Bill Nell
Signed-off-by: youkaichao
Co-authored-by: youkaichao
---
CMakeLists.txt | 4 +-
cmake/utils.cmake | 6 +--
pyproject.toml | 2 +-
requirements-build.txt | 2 +-
requirements-cuda.txt | 6 +--
requirements-openvino.txt | 2 +-
.../decoder_only/language/test_big_models.py | 46 ++++++++++++++-----
vllm/platforms/cuda.py | 5 ++
8 files changed, 48 insertions(+), 25 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fc4ac10b7669a..1a6a311e97633 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -49,7 +49,7 @@ set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx11
# requirements.txt files and should be kept consistent. The ROCm torch
# versions are derived from Dockerfile.rocm
#
-set(TORCH_SUPPORTED_VERSION_CUDA "2.4.0")
+set(TORCH_SUPPORTED_VERSION_CUDA "2.5.0")
set(TORCH_SUPPORTED_VERSION_ROCM "2.5.0")
#
@@ -507,7 +507,7 @@ else()
FetchContent_Declare(
vllm-flash-attn
GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git
- GIT_TAG 013f0c4fc47e6574060879d9734c1df8c5c273bd
+ GIT_TAG 5259c586c403a4e4d8bf69973c159b40cc346fb9
GIT_PROGRESS TRUE
# Don't share the vllm-flash-attn build between build types
BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn
diff --git a/cmake/utils.cmake b/cmake/utils.cmake
index 24bb7299338ac..40430dae10c5b 100644
--- a/cmake/utils.cmake
+++ b/cmake/utils.cmake
@@ -424,11 +424,7 @@ function (define_gpu_extension_target GPU_MOD_NAME)
# Don't use `TORCH_LIBRARIES` for CUDA since it pulls in a bunch of
# dependencies that are not necessary and may not be installed.
if (GPU_LANGUAGE STREQUAL "CUDA")
- if ("${CUDA_CUDA_LIB}" STREQUAL "")
- set(CUDA_CUDA_LIB "${CUDA_CUDA_LIBRARY}")
- endif()
- target_link_libraries(${GPU_MOD_NAME} PRIVATE ${CUDA_CUDA_LIB}
- ${CUDA_LIBRARIES})
+ target_link_libraries(${GPU_MOD_NAME} PRIVATE CUDA::cudart CUDA::cuda_driver)
else()
target_link_libraries(${GPU_MOD_NAME} PRIVATE ${TORCH_LIBRARIES})
endif()
diff --git a/pyproject.toml b/pyproject.toml
index e0c56ab79cad0..e78f5652f486b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ requires = [
"packaging",
"setuptools>=61",
"setuptools-scm>=8.0",
- "torch == 2.4.0",
+ "torch == 2.5.0",
"wheel",
"jinja2",
]
diff --git a/requirements-build.txt b/requirements-build.txt
index 6144a56da8c47..ea2b688bb3108 100644
--- a/requirements-build.txt
+++ b/requirements-build.txt
@@ -4,6 +4,6 @@ ninja
packaging
setuptools>=61
setuptools-scm>=8
-torch==2.4.0
+torch==2.5.0
wheel
jinja2
diff --git a/requirements-cuda.txt b/requirements-cuda.txt
index 3b3c2f876919e..92fa303d687a2 100644
--- a/requirements-cuda.txt
+++ b/requirements-cuda.txt
@@ -4,7 +4,7 @@
# Dependencies for NVIDIA GPUs
ray >= 2.9
nvidia-ml-py # for pynvml package
-torch == 2.4.0
+torch == 2.5.0
# These must be updated alongside torch
-torchvision == 0.19 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version
-xformers == 0.0.27.post2; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch 2.4.0
+torchvision == 0.20 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version
+xformers == 0.0.28.post2; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch 2.5.0
diff --git a/requirements-openvino.txt b/requirements-openvino.txt
index ac54cf0c3288f..7ad0d1e7f704b 100644
--- a/requirements-openvino.txt
+++ b/requirements-openvino.txt
@@ -1,7 +1,7 @@
# Common dependencies
-r requirements-common.txt
-torch == 2.4.0 # should be aligned with "common" vLLM torch version
+torch == 2.5.0 # should be aligned with "common" vLLM torch version
openvino >= 2024.4.0 # since 2024.4.0 both CPU and GPU support Paged Attention
optimum @ git+https://github.com/huggingface/optimum.git@main # latest optimum is used to support latest transformers version
diff --git a/tests/models/decoder_only/language/test_big_models.py b/tests/models/decoder_only/language/test_big_models.py
index 75625b35209ce..fcfc159e4f5a0 100644
--- a/tests/models/decoder_only/language/test_big_models.py
+++ b/tests/models/decoder_only/language/test_big_models.py
@@ -8,7 +8,7 @@
from vllm.platforms import current_platform
-from ...utils import check_outputs_equal
+from ...utils import check_logprobs_close, check_outputs_equal
MODELS = [
"meta-llama/Llama-2-7b-hf",
@@ -43,18 +43,40 @@ def test_models(
dtype: str,
max_tokens: int,
) -> None:
- with hf_runner(model, dtype=dtype) as hf_model:
- hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
- with vllm_runner(model, dtype=dtype, enforce_eager=True) as vllm_model:
- vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
-
- check_outputs_equal(
- outputs_0_lst=hf_outputs,
- outputs_1_lst=vllm_outputs,
- name_0="hf",
- name_1="vllm",
- )
+ if model == "openbmb/MiniCPM3-4B":
+ # the output becomes slightly different when upgrading to
+ # pytorch 2.5 . Changing to logprobs checks instead of exact
+ # output checks.
+ NUM_LOG_PROBS = 8
+ with hf_runner(model, dtype=dtype) as hf_model:
+ hf_outputs = hf_model.generate_greedy_logprobs_limit(
+ example_prompts, max_tokens, NUM_LOG_PROBS)
+
+ with vllm_runner(model, dtype=dtype, enforce_eager=True) as vllm_model:
+ vllm_outputs = vllm_model.generate_greedy_logprobs(
+ example_prompts, max_tokens, NUM_LOG_PROBS)
+
+ check_logprobs_close(
+ outputs_0_lst=hf_outputs,
+ outputs_1_lst=vllm_outputs,
+ name_0="hf",
+ name_1="vllm",
+ )
+ else:
+ with hf_runner(model, dtype=dtype) as hf_model:
+ hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
+
+ with vllm_runner(model, dtype=dtype, enforce_eager=True) as vllm_model:
+ vllm_outputs = vllm_model.generate_greedy(example_prompts,
+ max_tokens)
+
+ check_outputs_equal(
+ outputs_0_lst=hf_outputs,
+ outputs_1_lst=vllm_outputs,
+ name_0="hf",
+ name_1="vllm",
+ )
@pytest.mark.parametrize("model", MODELS)
diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py
index 30bbf5107475d..9c5212ace1346 100644
--- a/vllm/platforms/cuda.py
+++ b/vllm/platforms/cuda.py
@@ -7,6 +7,7 @@
from typing import Callable, List, Tuple, TypeVar
import pynvml
+import torch
from typing_extensions import ParamSpec
from vllm.logger import init_logger
@@ -26,6 +27,10 @@
" and cause errors. See https://pypi.org/project/pynvml "
"for more information.")
+# pytorch 2.5 uses cudnn sdpa by default, which will cause crash on some models
+# see https://github.com/huggingface/diffusers/issues/9704 for details
+torch.backends.cuda.enable_cudnn_sdp(False)
+
# NVML utils
# Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`,
# all the related functions work on real physical device ids.
From e130c40e4eba63ee8f04d493d83bca8c59b5ada5 Mon Sep 17 00:00:00 2001
From: Harry Mellor <19981378+hmellor@users.noreply.github.com>
Date: Sun, 27 Oct 2024 17:30:03 +0000
Subject: [PATCH 03/38] Fix cache management in "Close inactive issues and PRs"
actions workflow (#9734)
---
.github/workflows/stale.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 2418c61bdcf63..81e7c9b050760 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -10,6 +10,7 @@ jobs:
permissions:
issues: write
pull-requests: write
+ actions: write
runs-on: ubuntu-latest
steps:
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
From 34a9941620d00879599a51609225452b705bae89 Mon Sep 17 00:00:00 2001
From: madt2709 <55849102+madt2709@users.noreply.github.com>
Date: Sun, 27 Oct 2024 10:46:41 -0700
Subject: [PATCH 04/38] [Bugfix] Fix load config when using bools (#9533)
---
tests/data/test_config.yaml | 2 ++
tests/test_utils.py | 6 +++++-
vllm/engine/arg_utils.py | 14 +-------------
vllm/utils.py | 35 +++++++++++++++++++++++++++--------
4 files changed, 35 insertions(+), 22 deletions(-)
diff --git a/tests/data/test_config.yaml b/tests/data/test_config.yaml
index 42f4f6f7bb992..5090e8f357bb8 100644
--- a/tests/data/test_config.yaml
+++ b/tests/data/test_config.yaml
@@ -1,3 +1,5 @@
port: 12312
served_model_name: mymodel
tensor_parallel_size: 2
+trust_remote_code: true
+multi_step_stream_outputs: false
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 0fed8e678fc76..a731b11eae81c 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -6,7 +6,7 @@
import pytest
-from vllm.utils import (FlexibleArgumentParser, deprecate_kwargs,
+from vllm.utils import (FlexibleArgumentParser, StoreBoolean, deprecate_kwargs,
get_open_port, merge_async_iterators, supports_kw)
from .utils import error_on_warning
@@ -141,6 +141,8 @@ def parser_with_config():
parser.add_argument('--config', type=str)
parser.add_argument('--port', type=int)
parser.add_argument('--tensor-parallel-size', type=int)
+ parser.add_argument('--trust-remote-code', action='store_true')
+ parser.add_argument('--multi-step-stream-outputs', action=StoreBoolean)
return parser
@@ -214,6 +216,8 @@ def test_config_args(parser_with_config):
args = parser_with_config.parse_args(
['serve', 'mymodel', '--config', './data/test_config.yaml'])
assert args.tensor_parallel_size == 2
+ assert args.trust_remote_code
+ assert not args.multi_step_stream_outputs
def test_config_file(parser_with_config):
diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py
index c49f475b9ee61..38687809a31f6 100644
--- a/vllm/engine/arg_utils.py
+++ b/vllm/engine/arg_utils.py
@@ -19,7 +19,7 @@
from vllm.transformers_utils.config import (
maybe_register_config_serialize_by_value)
from vllm.transformers_utils.utils import check_gguf_file
-from vllm.utils import FlexibleArgumentParser
+from vllm.utils import FlexibleArgumentParser, StoreBoolean
if TYPE_CHECKING:
from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup
@@ -1144,18 +1144,6 @@ def add_cli_args(parser: FlexibleArgumentParser,
return parser
-class StoreBoolean(argparse.Action):
-
- def __call__(self, parser, namespace, values, option_string=None):
- if values.lower() == "true":
- setattr(namespace, self.dest, True)
- elif values.lower() == "false":
- setattr(namespace, self.dest, False)
- else:
- raise ValueError(f"Invalid boolean value: {values}. "
- "Expected 'true' or 'false'.")
-
-
# These functions are used by sphinx to build the documentation
def _engine_args_parser():
return EngineArgs.add_cli_args(FlexibleArgumentParser())
diff --git a/vllm/utils.py b/vllm/utils.py
index 1f75de89d0cc2..d4f2c936ca9cc 100644
--- a/vllm/utils.py
+++ b/vllm/utils.py
@@ -1155,6 +1155,18 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> None:
return wrapper
+class StoreBoolean(argparse.Action):
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ if values.lower() == "true":
+ setattr(namespace, self.dest, True)
+ elif values.lower() == "false":
+ setattr(namespace, self.dest, False)
+ else:
+ raise ValueError(f"Invalid boolean value: {values}. "
+ "Expected 'true' or 'false'.")
+
+
class FlexibleArgumentParser(argparse.ArgumentParser):
"""ArgumentParser that allows both underscore and dash in names."""
@@ -1163,7 +1175,7 @@ def parse_args(self, args=None, namespace=None):
args = sys.argv[1:]
if '--config' in args:
- args = FlexibleArgumentParser._pull_args_from_config(args)
+ args = self._pull_args_from_config(args)
# Convert underscores to dashes and vice versa in argument names
processed_args = []
@@ -1181,8 +1193,7 @@ def parse_args(self, args=None, namespace=None):
return super().parse_args(processed_args, namespace)
- @staticmethod
- def _pull_args_from_config(args: List[str]) -> List[str]:
+ def _pull_args_from_config(self, args: List[str]) -> List[str]:
"""Method to pull arguments specified in the config file
into the command-line args variable.
@@ -1226,7 +1237,7 @@ def _pull_args_from_config(args: List[str]) -> List[str]:
file_path = args[index + 1]
- config_args = FlexibleArgumentParser._load_config_file(file_path)
+ config_args = self._load_config_file(file_path)
# 0th index is for {serve,chat,complete}
# followed by model_tag (only for serve)
@@ -1247,8 +1258,7 @@ def _pull_args_from_config(args: List[str]) -> List[str]:
return args
- @staticmethod
- def _load_config_file(file_path: str) -> List[str]:
+ def _load_config_file(self, file_path: str) -> List[str]:
"""Loads a yaml file and returns the key value pairs as a
flattened list with argparse like pattern
```yaml
@@ -1282,9 +1292,18 @@ def _load_config_file(file_path: str) -> List[str]:
Make sure path is correct", file_path)
raise ex
+ store_boolean_arguments = [
+ action.dest for action in self._actions
+ if isinstance(action, StoreBoolean)
+ ]
+
for key, value in config.items():
- processed_args.append('--' + key)
- processed_args.append(str(value))
+ if isinstance(value, bool) and key not in store_boolean_arguments:
+ if value:
+ processed_args.append('--' + key)
+ else:
+ processed_args.append('--' + key)
+ processed_args.append(str(value))
return processed_args
From 4e2d95e372ad5fbef7b27c66d527c37477c0c8bb Mon Sep 17 00:00:00 2001
From: wangshuai09 <391746016@qq.com>
Date: Mon, 28 Oct 2024 12:07:00 +0800
Subject: [PATCH 05/38] [Hardware][ROCM] using current_platform.is_rocm (#9642)
Signed-off-by: wangshuai09 <391746016@qq.com>
---
.../test_basic_correctness.py | 4 +-
tests/compile/utils.py | 4 +-
tests/kernels/quant_utils.py | 17 +++--
tests/kernels/test_attention.py | 23 +++---
tests/kernels/test_attention_selector.py | 3 +-
tests/kernels/test_blocksparse_attention.py | 7 +-
tests/kernels/test_encoder_decoder_attn.py | 76 ++++++++++---------
tests/kernels/test_moe.py | 7 +-
tests/lora/test_gemma.py | 5 +-
tests/lora/test_quant_model.py | 4 +-
.../vision_language/test_paligemma.py | 9 ++-
.../vision_language/test_phi3v.py | 3 +-
.../e2e/test_integration_dist_tp2.py | 4 +-
tests/utils.py | 4 +-
vllm/_custom_ops.py | 8 +-
.../ops/blocksparse_attention/interface.py | 6 +-
vllm/attention/selector.py | 4 +-
vllm/config.py | 49 ++++++------
vllm/executor/ray_utils.py | 4 +-
vllm/model_executor/custom_op.py | 4 +-
.../compressed_tensors_moe.py | 5 +-
.../schemes/compressed_tensors_w8a8_fp8.py | 6 +-
.../layers/quantization/fbgemm_fp8.py | 3 +-
.../model_executor/layers/quantization/fp8.py | 10 +--
.../layers/quantization/utils/w8a8_utils.py | 6 +-
vllm/model_executor/models/exaone.py | 4 +-
vllm/model_executor/models/granite.py | 4 +-
vllm/model_executor/models/llama.py | 4 +-
vllm/model_executor/models/registry.py | 4 +-
vllm/model_executor/models/solar.py | 4 +-
vllm/utils.py | 6 +-
vllm/worker/model_runner.py | 9 ++-
32 files changed, 162 insertions(+), 148 deletions(-)
diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py
index 3c2ca1bddd906..79647589d5204 100644
--- a/tests/basic_correctness/test_basic_correctness.py
+++ b/tests/basic_correctness/test_basic_correctness.py
@@ -11,7 +11,7 @@
import pytest
from vllm import LLM
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata
from ..models.utils import check_outputs_equal
@@ -51,7 +51,7 @@ def test_models(
enforce_eager: bool,
) -> None:
- if backend == "FLASHINFER" and is_hip():
+ if backend == "FLASHINFER" and current_platform.is_rocm():
pytest.skip("Flashinfer does not support ROCm/HIP.")
os.environ["VLLM_ATTENTION_BACKEND"] = backend
diff --git a/tests/compile/utils.py b/tests/compile/utils.py
index c69343b51ae02..64fc08e80de3b 100644
--- a/tests/compile/utils.py
+++ b/tests/compile/utils.py
@@ -5,7 +5,7 @@
from tests.quantization.utils import is_quant_method_supported
from vllm import LLM, SamplingParams
from vllm.compilation.levels import CompilationLevel
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
TEST_MODELS = [
("facebook/opt-125m", {}),
@@ -55,7 +55,7 @@
"quantization": "marlin"
}))
-if not is_hip() and is_quant_method_supported("awq"):
+if not current_platform.is_rocm() and is_quant_method_supported("awq"):
TEST_MODELS.append(("TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ", {
"quantization": "AWQ"
}))
diff --git a/tests/kernels/quant_utils.py b/tests/kernels/quant_utils.py
index 8f6a54ff5979c..f2358940fc7b8 100644
--- a/tests/kernels/quant_utils.py
+++ b/tests/kernels/quant_utils.py
@@ -2,12 +2,13 @@
import torch
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
# Using the default value (240.0) from pytorch will cause accuracy
# issue on dynamic quantization models. Here use 224.0 for rocm.
ROCM_FP8_MAX = 224.0
-FP8_DTYPE = torch.float8_e4m3fnuz if is_hip() else torch.float8_e4m3fn
+FP8_DTYPE = torch.float8_e4m3fnuz if current_platform.is_rocm() \
+ else torch.float8_e4m3fn
def as_float32_tensor(x: Union[float, torch.tensor]) -> torch.tensor:
@@ -24,8 +25,10 @@ def ref_dynamic_per_token_quant(x: torch.tensor,
qtype_traits = torch.iinfo(quant_dtype) if quant_dtype == torch.int8 \
else torch.finfo(quant_dtype)
- qtype_traits_max = ROCM_FP8_MAX if is_hip() else qtype_traits.max
- qtype_traits_min = -ROCM_FP8_MAX if is_hip() else qtype_traits.min
+ qtype_traits_max = ROCM_FP8_MAX if current_platform.is_rocm() \
+ else qtype_traits.max
+ qtype_traits_min = -ROCM_FP8_MAX if current_platform.is_rocm() \
+ else qtype_traits.min
qtype_max = as_float32_tensor(qtype_traits_max)
s_1 = as_float32_tensor(1.0)
s_512 = as_float32_tensor(512.0)
@@ -66,8 +69,10 @@ def ref_dynamic_per_tensor_fp8_quant(x: torch.tensor) \
-> Tuple[torch.tensor, torch.tensor]:
fp8_traits = torch.finfo(FP8_DTYPE)
- fp8_traits_max = ROCM_FP8_MAX if is_hip() else fp8_traits.max
- fp8_traits_min = -ROCM_FP8_MAX if is_hip() else fp8_traits.min
+ fp8_traits_max = ROCM_FP8_MAX if current_platform.is_rocm() \
+ else fp8_traits.max
+ fp8_traits_min = -ROCM_FP8_MAX if current_platform.is_rocm() \
+ else fp8_traits.min
fp8_max = as_float32_tensor(fp8_traits_max)
one = as_float32_tensor(1.0)
diff --git a/tests/kernels/test_attention.py b/tests/kernels/test_attention.py
index 52f1ecd176963..1604aa4d2d6e5 100644
--- a/tests/kernels/test_attention.py
+++ b/tests/kernels/test_attention.py
@@ -6,11 +6,12 @@
from tests.kernels.utils import opcheck
from vllm import _custom_ops as ops
-from vllm.utils import get_max_shared_memory_bytes, is_hip, seed_everything
+from vllm.platforms import current_platform
+from vllm.utils import get_max_shared_memory_bytes, seed_everything
from .allclose_default import get_default_atol, get_default_rtol
-if not is_hip():
+if not current_platform.is_rocm():
from xformers import ops as xops
from xformers.ops.fmha.attn_bias import BlockDiagonalCausalMask
@@ -23,8 +24,9 @@
NUM_BLOCKS = 4321 # Arbitrary values for testing
PARTITION_SIZE = 512
# flshattF and tritonflashattF supported: {torch.float16, torch.bfloat16}
-DTYPES = [torch.half, torch.bfloat16, torch.float
- ] if not is_hip() else [torch.half, torch.bfloat16]
+DTYPES = [
+ torch.half, torch.bfloat16, torch.float
+] if not current_platform.is_rocm() else [torch.half, torch.bfloat16]
NUM_GEN_SEQS = [7] # Arbitrary values for testing
NUM_PREFILL_SEQS = [3] # Arbitrary values for testing
NUM_HEADS = [(40, 40), (64, 8)] # Arbitrary values for testing
@@ -114,7 +116,8 @@ def ref_single_query_cached_kv_attention(
@pytest.mark.parametrize(
- "version", ["v1", "v2"] if not is_hip() else ["v1", "v2", "rocm"])
+ "version",
+ ["v1", "v2"] if not current_platform.is_rocm() else ["v1", "v2", "rocm"])
@pytest.mark.parametrize("num_seqs", NUM_GEN_SEQS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@@ -317,8 +320,8 @@ def test_paged_attention(
# NOTE(woosuk): Due to the kernel-level differences in the two
# implementations, there is a small numerical difference in the two
# outputs. Thus, we use a relaxed tolerance for the test.
- atol = get_default_atol(output) if is_hip() else 1e-3
- rtol = get_default_rtol(output) if is_hip() else 1e-5
+ atol = get_default_atol(output) if current_platform.is_rocm() else 1e-3
+ rtol = get_default_rtol(output) if current_platform.is_rocm() else 1e-5
# NOTE(zhaoyang): FP8 KV Cache will introduce quantization error,
# so we use a relaxed tolerance for the test.
@@ -368,7 +371,7 @@ def ref_multi_query_kv_attention(
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES)
-@pytest.mark.skipif(is_hip(),
+@pytest.mark.skipif(current_platform.is_rocm(),
reason="Xformers backend is not supported on ROCm.")
@torch.inference_mode()
def test_multi_query_kv_attention(
@@ -425,6 +428,6 @@ def test_multi_query_kv_attention(
scale,
dtype,
)
- atol = get_default_atol(output) if is_hip() else 1e-3
- rtol = get_default_rtol(output) if is_hip() else 1e-5
+ atol = get_default_atol(output) if current_platform.is_rocm() else 1e-3
+ rtol = get_default_rtol(output) if current_platform.is_rocm() else 1e-5
torch.testing.assert_close(output, ref_output, atol=atol, rtol=rtol)
diff --git a/tests/kernels/test_attention_selector.py b/tests/kernels/test_attention_selector.py
index df3e770e260e0..3fe9ca0b0450f 100644
--- a/tests/kernels/test_attention_selector.py
+++ b/tests/kernels/test_attention_selector.py
@@ -25,7 +25,8 @@ def test_env(name: str, device: str, monkeypatch):
False)
assert backend.name == "TORCH_SDPA"
elif device == "hip":
- with patch("vllm.attention.selector.is_hip", return_value=True):
+ with patch("vllm.attention.selector.current_platform.is_rocm",
+ return_value=True):
backend = which_attn_to_use(16, torch.float16, torch.float16, 16,
False)
assert backend.name == "ROCM_FLASH"
diff --git a/tests/kernels/test_blocksparse_attention.py b/tests/kernels/test_blocksparse_attention.py
index f3bd8f0524264..b65efb3abc230 100644
--- a/tests/kernels/test_blocksparse_attention.py
+++ b/tests/kernels/test_blocksparse_attention.py
@@ -7,7 +7,8 @@
from vllm import _custom_ops as ops
from vllm.attention.ops.blocksparse_attention.interface import (
LocalStridedBlockSparseAttn)
-from vllm.utils import get_max_shared_memory_bytes, is_hip, seed_everything
+from vllm.platforms import current_platform
+from vllm.utils import get_max_shared_memory_bytes, seed_everything
from .allclose_default import get_default_atol, get_default_rtol
@@ -316,8 +317,8 @@ def test_paged_attention(
# NOTE(woosuk): Due to the kernel-level differences in the two
# implementations, there is a small numerical difference in the two
# outputs. Thus, we use a relaxed tolerance for the test.
- atol = get_default_atol(output) if is_hip() else 1e-3
- rtol = get_default_rtol(output) if is_hip() else 1e-5
+ atol = get_default_atol(output) if current_platform.is_rocm() else 1e-3
+ rtol = get_default_rtol(output) if current_platform.is_rocm() else 1e-5
# NOTE(zhaoyang): FP8 KV Cache will introduce quantization error,
# so we use a relaxed tolerance for the test.
diff --git a/tests/kernels/test_encoder_decoder_attn.py b/tests/kernels/test_encoder_decoder_attn.py
index 6b979d0558c46..bc99c5559d388 100644
--- a/tests/kernels/test_encoder_decoder_attn.py
+++ b/tests/kernels/test_encoder_decoder_attn.py
@@ -18,7 +18,7 @@
from vllm.attention.backends.utils import STR_NOT_IMPL_ENC_DEC_ROCM_HIP
from vllm.attention.selector import (_Backend,
global_force_attn_backend_context_manager)
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
# List of support backends for encoder/decoder models
LIST_ENC_DEC_SUPPORTED_BACKENDS = [_Backend.XFORMERS]
@@ -82,7 +82,7 @@ class TestResources(NamedTuple):
will leverage attn_backend for the purpose of
constructing backend-compatible attention
metadata instances
-
+
Attributes:
* scale: 1/sqrt(d) scale factor for attn
@@ -105,10 +105,10 @@ def _make_test_resources(test_pt: TestPoint, ) -> TestResources:
Build key components for performing encoder/decoder attention test.
Note that
- (1) The Attention instance constructed here, automatically selects
+ (1) The Attention instance constructed here, automatically selects
an attention backend class based on platform info & a set of canned
heuristics, so
- (2) The attention backend instance constructed here is thus *not
+ (2) The attention backend instance constructed here is thus *not
the same backend instance* used by attn, but rather it is
intended to be a *different instance* of the *same backend class*;
therefore,
@@ -156,7 +156,7 @@ def _encoder_attn_setup(
'''
Set up test vectors & data structures for encoder attention test.
- A triplet of synthetic query/key/value tensors are constructed.
+ A triplet of synthetic query/key/value tensors are constructed.
Given this is an encoder attention test, the key & value
sequences will have the same length as the corresponding queries.
@@ -169,14 +169,14 @@ def _encoder_attn_setup(
Arguments:
* test_pt: TestPoint data structure; this function relies on the
- following fields: batch_size, num_heads, head_size,
+ following fields: batch_size, num_heads, head_size,
block_size, max_q_seq_len
* test_rsrcs: TestResources data structure; this function relies on the
scale field
-
+
Returns:
-
+
* PhaseTestParameters data structure comprising (1) packed query/key/value
tensors, (2) the ideal output of attention computed using a naive
implementation, and (3) KVCache field set to None
@@ -265,7 +265,7 @@ def _decoder_attn_setup(
Arguments:
* test_pt: TestPoint data structure; this function relies on the
- following fields: batch_size, num_heads, head_size,
+ following fields: batch_size, num_heads, head_size,
block_size, max_q_seq_len
* test_rsrcs: TestResources data structure; this function relies on the
scale field
@@ -275,14 +275,14 @@ def _decoder_attn_setup(
* qkv: Unpacked (batch_size x padded_seq_len x num_heads x
head_size) query/key/value tensors
* Prefill-phase decoder self-attention PhaseTestParameters data structure,
- including (1) packed (number_of_tokens x num_heads x head_size)
+ including (1) packed (number_of_tokens x num_heads x head_size)
query/key/value tensors along with (2) ideal attention output
- computed using a naive implementation, and (3) memory-mapping data
+ computed using a naive implementation, and (3) memory-mapping data
structures appropriate for prefill phase.
- * Decode-phase decoder self-attention PhaseTestParameters data structure,
- including (1) packed (number_of_tokens x num_heads x head_size)
- query/key/value tensors along with (2) ideal attention output
- computed using a naive implementation, and (3) memory-mapping data
+ * Decode-phase decoder self-attention PhaseTestParameters data structure,
+ including (1) packed (number_of_tokens x num_heads x head_size)
+ query/key/value tensors along with (2) ideal attention output
+ computed using a naive implementation, and (3) memory-mapping data
structures appropriate for decode phase.
* max_block_idx: max physical address in decoder self-attention block-table
(intended to be used as the base address for the encoder/
@@ -436,12 +436,12 @@ def _enc_dec_cross_attn_setup_reuses_query(
This function also constructs the cross-attention KV cache memory mapping
(slot mapping and block table), ensuring that the block table starts at
- block_base_addr.
+ block_base_addr.
Arguments:
* decoder_qkv: pre-existing unpacked (batch_size x padded_seq_len x
- num_heads x head_size) decoder self-attention inputs;
+ num_heads x head_size) decoder self-attention inputs;
this function relies on the query and q_seq_lens
fields
* encoder_test_params: PhaseTestParameters data structure which was
@@ -452,7 +452,7 @@ def _enc_dec_cross_attn_setup_reuses_query(
self-attention; all fields
including KV cache required
* test_pt: TestPoint data structure; this function relies on the
- following fields: batch_size, num_heads, head_size,
+ following fields: batch_size, num_heads, head_size,
block_size, max_q_seq_len
* test_rsrcs: TestResources data structure; this function relies on the
scale field
@@ -460,16 +460,16 @@ def _enc_dec_cross_attn_setup_reuses_query(
Returns:
- * Prefill-phase encoder/decoder cross-attention PhaseTestParameters data
- structure, including (1) packed
+ * Prefill-phase encoder/decoder cross-attention PhaseTestParameters data
+ structure, including (1) packed
(number_of_tokens x num_heads x head_size) query/key/value tensors
- along with (2) ideal attention output computed using a
+ along with (2) ideal attention output computed using a
naive implementation, and (3) memory-mapping data structures appropriate
for prefill phase.
- * Decode-phase encoder/decoder cross-attention PhaseTestParameters data
+ * Decode-phase encoder/decoder cross-attention PhaseTestParameters data
structure, including (1) packed
(number_of_tokens x num_heads x head_size) query/key/value tensors
- along with (2) ideal attention output computed using a
+ along with (2) ideal attention output computed using a
naive implementation, and (3) memory-mapping data structures appropriate
for decode phase.
'''
@@ -596,7 +596,7 @@ def _run_encoder_attention_test(
'''
Run encoder attention.
- attn.forward() is passed attn_type=AttentionType.ENCODER in order
+ attn.forward() is passed attn_type=AttentionType.ENCODER in order
to configure the kernel invocation for encoder attention
Requires attn_metadata.num_decode_tokens == 0
@@ -607,7 +607,7 @@ def _run_encoder_attention_test(
* attn: Attention wrapper instance
* encoder_test_params: encoder PhaseTestParameters data structure;
this function relies on the packed
- (number_of_tokens x num_heads x head_size)
+ (number_of_tokens x num_heads x head_size)
query/key/value fields
* attn_metadata: attention metadata for encoder/decoder-self attention
@@ -646,7 +646,7 @@ def _run_decoder_self_attention_test(
and attn (Attention wrapper instance) fields
* decoder_test_params: decoder PhaseTestParameters data structure;
this function relies on the packed
- (number_of_tokens x num_heads x head_size)
+ (number_of_tokens x num_heads x head_size)
query/key/value fields
* attn_metadata: attention metadata for decoder-self attention
(contains KV cache memory-mapping)
@@ -694,11 +694,11 @@ def _run_encoder_decoder_cross_attention_test(
and attn (Attention wrapper instance) fields
* decoder_test_params: decoder PhaseTestParameters data structure;
this function relies on the packed
- (number_of_tokens x num_heads x head_size)
+ (number_of_tokens x num_heads x head_size)
query field
* cross_test_params: encoder/decoder PhaseTestParameters data structure;
this function relies on the packed
- (number_of_tokens x num_heads x head_size)
+ (number_of_tokens x num_heads x head_size)
key/value fields
* attn_metadata: attention metadata for encoder/decoder-self attention
@@ -726,7 +726,8 @@ def _run_encoder_decoder_cross_attention_test(
attn_type=attn_type)
-@pytest.mark.skipif(is_hip(), reason=STR_NOT_IMPL_ENC_DEC_ROCM_HIP)
+@pytest.mark.skipif(current_platform.is_rocm(),
+ reason=STR_NOT_IMPL_ENC_DEC_ROCM_HIP)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("attn_backend", LIST_ENC_DEC_SUPPORTED_BACKENDS)
@@ -755,7 +756,8 @@ def test_encoder_only(
No KV cache is required for encoder-only attention.
Note on ROCm/HIP: currently encoder/decoder models are not supported on
- AMD GPUs, therefore this test simply is skipped if is_hip().
+ AMD GPUs, therefore this test simply is skipped if
+ current_platform.is_rocm().
This test globally forces an override of the usual backend
auto-selection process, forcing the specific backend-under-test
@@ -811,7 +813,8 @@ def test_encoder_only(
assert_actual_matches_ideal(enc_test_params, enc_pckd_act_out)
-@pytest.mark.skipif(is_hip(), reason=STR_NOT_IMPL_ENC_DEC_ROCM_HIP)
+@pytest.mark.skipif(current_platform.is_rocm(),
+ reason=STR_NOT_IMPL_ENC_DEC_ROCM_HIP)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("attn_backend", LIST_ENC_DEC_SUPPORTED_BACKENDS)
@@ -837,14 +840,14 @@ def test_e2e_enc_dec_attn(
attributes for prefill-phase, and (2) an analogous attention metadata
structure but for decode-phase
* Test attention steps in the following order
-
+
* Encoder attention
* Prefill self-attention
* Prefill cross-attention
* Decode self-attention
* Decode cross-attention
- * Besides being reflective of realistic use-cases, this order would
- exacerbate any accidental overlap in the self-/cross-attention
+ * Besides being reflective of realistic use-cases, this order would
+ exacerbate any accidental overlap in the self-/cross-attention
block tables, which one hopes to avoid
@@ -864,10 +867,11 @@ def test_e2e_enc_dec_attn(
to be utilized.
Note on ROCm/HIP: currently encoder/decoder models are not supported on
- AMD GPUs, therefore this test simply is skipped if is_hip().
+ AMD GPUs, therefore this test simply is skipped if
+ current_platform.is_rocm().
Note on metadata: there is a single attention metadata structure shared by
- all prefill-phase attention operations (encoder, decoder, enc/dec cross),
+ all prefill-phase attention operations (encoder, decoder, enc/dec cross),
and a single one shared by all decode-phase attention operations
(decoder & enc/dec cross.) This is intended to reflect the behavior
of EncoderDecoderModelRunner, which constructs a single attention metadata
diff --git a/tests/kernels/test_moe.py b/tests/kernels/test_moe.py
index c0053071258ea..4bfc089c82179 100644
--- a/tests/kernels/test_moe.py
+++ b/tests/kernels/test_moe.py
@@ -18,8 +18,9 @@
from vllm.model_executor.layers.quantization.utils.marlin_utils_test import (
marlin_quantize)
from vllm.model_executor.models.mixtral import MixtralMoE
+from vllm.platforms import current_platform
from vllm.scalar_type import scalar_types
-from vllm.utils import is_hip, seed_everything
+from vllm.utils import seed_everything
@pytest.mark.parametrize("m", [1024 * 128, 512, 222, 33, 1])
@@ -103,7 +104,7 @@ def test_mixtral_moe(dtype: torch.dtype):
@pytest.mark.parametrize("act_order", [True, False])
@pytest.mark.parametrize("num_bits", [4, 8])
@pytest.mark.parametrize("is_k_full", [True, False])
-@pytest.mark.skipif(is_hip(), reason="Skip for rocm")
+@pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm")
def test_fused_marlin_moe(
m: int,
n: int,
@@ -256,7 +257,7 @@ def test_fused_marlin_moe(
@pytest.mark.parametrize("act_order", [True, False])
@pytest.mark.parametrize("num_bits", [4, 8])
@pytest.mark.parametrize("is_k_full", [True, False])
-@pytest.mark.skipif(is_hip(), reason="Skip for rocm")
+@pytest.mark.skipif(current_platform.is_rocm(), reason="Skip for rocm")
def test_single_marlin_moe_multiply(
m: int,
n: int,
diff --git a/tests/lora/test_gemma.py b/tests/lora/test_gemma.py
index f7c1d4f041c12..15ec66b0f5502 100644
--- a/tests/lora/test_gemma.py
+++ b/tests/lora/test_gemma.py
@@ -4,7 +4,7 @@
import vllm
from vllm.lora.request import LoRARequest
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
MODEL_PATH = "google/gemma-7b"
@@ -31,7 +31,8 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]:
return generated_texts
-@pytest.mark.xfail(is_hip(), reason="There can be output mismatch on ROCm")
+@pytest.mark.xfail(current_platform.is_rocm(),
+ reason="There can be output mismatch on ROCm")
def test_gemma_lora(gemma_lora_files):
llm = vllm.LLM(MODEL_PATH,
max_model_len=1024,
diff --git a/tests/lora/test_quant_model.py b/tests/lora/test_quant_model.py
index d004c65929418..5432fa4ad0d3a 100644
--- a/tests/lora/test_quant_model.py
+++ b/tests/lora/test_quant_model.py
@@ -8,7 +8,7 @@
import vllm
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.lora.request import LoRARequest
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
@dataclass
@@ -19,7 +19,7 @@ class ModelWithQuantization:
MODELS: List[ModelWithQuantization]
#AWQ quantization is currently not supported in ROCm.
-if is_hip():
+if current_platform.is_rocm():
MODELS = [
ModelWithQuantization(
model_path="TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ",
diff --git a/tests/models/decoder_only/vision_language/test_paligemma.py b/tests/models/decoder_only/vision_language/test_paligemma.py
index a3ca0845e5ff8..69189ba2f25cb 100644
--- a/tests/models/decoder_only/vision_language/test_paligemma.py
+++ b/tests/models/decoder_only/vision_language/test_paligemma.py
@@ -6,8 +6,9 @@
BatchEncoding)
from vllm.multimodal.utils import rescale_image_size
+from vllm.platforms import current_platform
from vllm.sequence import SampleLogprobs
-from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, is_hip
+from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE
from ....conftest import IMAGE_ASSETS, HfRunner, VllmRunner, _ImageAssets
from ...utils import check_logprobs_close
@@ -24,7 +25,7 @@
# ROCm Triton FA can run into compilation issues with these models due to,
# excessive use of shared memory. Use other backends in the meantime.
# FIXME (mattwong, gshtrasb, hongxiayan)
-if is_hip():
+if current_platform.is_rocm():
os.environ["VLLM_USE_TRITON_FLASH_ATTN"] = "0"
@@ -70,7 +71,7 @@ def run_test(
All the image fixtures for the test are from IMAGE_ASSETS.
For huggingface runner, we provide the PIL images as input.
- For vllm runner, we provide MultiModalDataDict objects
+ For vllm runner, we provide MultiModalDataDict objects
and corresponding MultiModalConfig as input.
Note, the text input is also adjusted to abide by vllm contract.
The text output is sanitized to be able to compare with hf.
@@ -151,7 +152,7 @@ def process(hf_inputs: BatchEncoding):
pytest.param(
"float",
marks=pytest.mark.skipif(
- is_hip(),
+ current_platform.is_rocm(),
reason=
"ROCm FA does not yet fully support 32-bit precision on PaliGemma")
), "half"
diff --git a/tests/models/decoder_only/vision_language/test_phi3v.py b/tests/models/decoder_only/vision_language/test_phi3v.py
index dfe10629f1c66..1840b4bb8574c 100644
--- a/tests/models/decoder_only/vision_language/test_phi3v.py
+++ b/tests/models/decoder_only/vision_language/test_phi3v.py
@@ -12,7 +12,6 @@
from vllm.multimodal.utils import rescale_image_size
from vllm.platforms import current_platform
from vllm.sequence import SampleLogprobs
-from vllm.utils import is_hip
from ....conftest import (IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner,
_ImageAssets)
@@ -56,7 +55,7 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
# ROCm Triton FA can run into shared memory issues with these models,
# use other backends in the meantime
# FIXME (mattwong, gshtrasb, hongxiayan)
-if is_hip():
+if current_platform.is_rocm():
os.environ["VLLM_USE_TRITON_FLASH_ATTN"] = "0"
diff --git a/tests/spec_decode/e2e/test_integration_dist_tp2.py b/tests/spec_decode/e2e/test_integration_dist_tp2.py
index b829d1a5be784..25562ca85adf4 100644
--- a/tests/spec_decode/e2e/test_integration_dist_tp2.py
+++ b/tests/spec_decode/e2e/test_integration_dist_tp2.py
@@ -5,7 +5,7 @@
import pytest
import torch
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
from .conftest import run_equality_correctness_test_tp
@@ -51,7 +51,7 @@ def test_target_model_tp_gt_1(common_llm_kwargs, per_test_common_llm_kwargs,
batch_size: int, output_len: int, seed: int):
"""Verify greedy equality when tensor parallelism is used.
"""
- if is_hip():
+ if current_platform.is_rocm():
pytest.skip("hip is not well-supported yet")
run_equality_correctness_test_tp("JackFram/llama-68m",
common_llm_kwargs,
diff --git a/tests/utils.py b/tests/utils.py
index e983104e3cb0c..0c61891cfefec 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -26,7 +26,7 @@
from vllm.platforms import current_platform
from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.utils import (FlexibleArgumentParser, GB_bytes,
- cuda_device_count_stateless, get_open_port, is_hip)
+ cuda_device_count_stateless, get_open_port)
if current_platform.is_rocm():
from amdsmi import (amdsmi_get_gpu_vram_usage,
@@ -487,7 +487,7 @@ def wait_for_gpu_memory_to_clear(devices: List[int],
output: Dict[int, str] = {}
output_raw: Dict[int, float] = {}
for device in devices:
- if is_hip():
+ if current_platform.is_rocm():
dev_handle = amdsmi_get_processor_handles()[device]
mem_info = amdsmi_get_gpu_vram_usage(dev_handle)
gb_used = mem_info["vram_used"] / 2**10
diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py
index f57414bd5197e..46a2fb8bc80a2 100644
--- a/vllm/_custom_ops.py
+++ b/vllm/_custom_ops.py
@@ -659,11 +659,11 @@ def scaled_fp8_quant(
Args:
input: The input tensor to be quantized to FP8
scale: Optional scaling factor for the FP8 quantization
- scale_ub: Optional upper bound for scaling factor in dynamic
+ scale_ub: Optional upper bound for scaling factor in dynamic
per token case
num_token_padding: If specified, pad the first dimension
of the output to at least this value.
- use_per_token_if_dynamic: Whether to do per_tensor or per_token
+ use_per_token_if_dynamic: Whether to do per_tensor or per_token
in the dynamic quantization case.
Returns:
@@ -674,8 +674,8 @@ def scaled_fp8_quant(
assert (input.ndim == 2)
shape: Union[Tuple[int, int], torch.Size] = input.shape
# For rocm, the output fp8 dtype is torch.float_e3m3fnuz
- out_dtype: torch.dtype = torch.float8_e4m3fnuz if vllm.utils.is_hip() \
- else torch.float8_e4m3fn
+ out_dtype: torch.dtype = torch.float8_e4m3fnuz \
+ if current_platform.is_rocm() else torch.float8_e4m3fn
if num_token_padding:
shape = (max(num_token_padding, input.shape[0]), shape[1])
output = torch.empty(shape, device=input.device, dtype=out_dtype)
diff --git a/vllm/attention/ops/blocksparse_attention/interface.py b/vllm/attention/ops/blocksparse_attention/interface.py
index e4dc576d27932..a98eb431ac7fc 100644
--- a/vllm/attention/ops/blocksparse_attention/interface.py
+++ b/vllm/attention/ops/blocksparse_attention/interface.py
@@ -3,7 +3,6 @@
import torch
from vllm.platforms import current_platform
-from vllm.utils import is_hip
from .utils import (dense_to_crow_col, get_head_sliding_step,
get_sparse_attn_mask)
@@ -32,8 +31,9 @@ def __init__(
):
super().__init__()
if use_spda is None:
- use_spda = is_hip() or current_platform.is_cpu() or not \
- IS_COMPUTE_8_OR_ABOVE
+ use_spda = current_platform.is_rocm() or \
+ current_platform.is_cpu() or not \
+ IS_COMPUTE_8_OR_ABOVE
device = device or (torch.cuda.current_device()
if current_platform.is_cuda_alike() else "cpu")
device = torch.device(device)
diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py
index 10d4509b38279..376b3136f0fb8 100644
--- a/vllm/attention/selector.py
+++ b/vllm/attention/selector.py
@@ -10,7 +10,7 @@
from vllm.attention.backends.abstract import AttentionBackend
from vllm.logger import init_logger
from vllm.platforms import current_platform
-from vllm.utils import STR_BACKEND_ENV_VAR, is_hip
+from vllm.utils import STR_BACKEND_ENV_VAR
logger = init_logger(__name__)
@@ -208,7 +208,7 @@ def which_attn_to_use(
logger.info("Cannot use %s backend on TPU.", selected_backend)
return _Backend.PALLAS
- if is_hip():
+ if current_platform.is_rocm():
# AMD GPUs.
selected_backend = (_Backend.ROCM_FLASH if selected_backend
== _Backend.FLASH_ATTN else selected_backend)
diff --git a/vllm/config.py b/vllm/config.py
index a1fba98233b80..99a82c8f1b40b 100644
--- a/vllm/config.py
+++ b/vllm/config.py
@@ -17,7 +17,7 @@
get_hf_image_processor_config,
get_hf_text_config)
from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory,
- is_hip, print_warning_once)
+ print_warning_once)
if TYPE_CHECKING:
from ray.util.placement_group import PlacementGroup
@@ -43,7 +43,7 @@ class ModelConfig:
Args:
model: Name or path of the huggingface model to use.
- It is also used as the content for `model_name` tag in metrics
+ It is also used as the content for `model_name` tag in metrics
output when `served_model_name` is not specified.
task: The task to use the model for. Each vLLM instance only supports
one task, even if the same model can be used for multiple tasks.
@@ -99,15 +99,15 @@ class ModelConfig:
skip_tokenizer_init: If true, skip initialization of tokenizer and
detokenizer.
served_model_name: The model name used in metrics tag `model_name`,
- matches the model name exposed via the APIs. If multiple model
- names provided, the first name will be used. If not specified,
+ matches the model name exposed via the APIs. If multiple model
+ names provided, the first name will be used. If not specified,
the model name will be the same as `model`.
- limit_mm_per_prompt: Maximum number of data instances per modality
+ limit_mm_per_prompt: Maximum number of data instances per modality
per prompt. Only applicable for multimodal models.
- override_neuron_config: Initialize non default neuron config or
- override default neuron config that are specific to Neuron devices,
- this argument will be used to configure the neuron config that
- can not be gathered from the vllm arguments.
+ override_neuron_config: Initialize non default neuron config or
+ override default neuron config that are specific to Neuron devices,
+ this argument will be used to configure the neuron config that
+ can not be gathered from the vllm arguments.
config_format: The config format which shall be loaded.
Defaults to 'auto' which defaults to 'hf'.
mm_processor_kwargs: Arguments to be forwarded to the model's processor
@@ -350,7 +350,7 @@ def _verify_quantization(self) -> None:
raise ValueError(
f"Unknown quantization method: {self.quantization}. Must "
f"be one of {supported_quantization}.")
- if is_hip(
+ if current_platform.is_rocm(
) and self.quantization not in rocm_supported_quantization:
raise ValueError(
f"{self.quantization} quantization is currently not "
@@ -365,7 +365,7 @@ def _verify_quantization(self) -> None:
"%s quantization is not fully "
"optimized yet. The speed can be slower than "
"non-quantized models.", self.quantization)
- if (self.quantization == "awq" and is_hip()
+ if (self.quantization == "awq" and current_platform.is_rocm()
and not envs.VLLM_USE_TRITON_AWQ):
logger.warning(
"Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ"
@@ -385,7 +385,7 @@ def _verify_cuda_graph(self) -> None:
def _verify_bnb_config(self) -> None:
"""
- The current version of bitsandbytes (0.44.0) with 8-bit models does not
+ The current version of bitsandbytes (0.44.0) with 8-bit models does not
yet support CUDA graph.
"""
is_bitsandbytes = self.quantization == "bitsandbytes"
@@ -810,7 +810,7 @@ class LoadConfig:
fast weight loading.
"bitsandbytes" will load nf4 type weights.
ignore_patterns: The list of patterns to ignore when loading the model.
- Default to "original/**/*" to avoid repeated loading of llama's
+ Default to "original/**/*" to avoid repeated loading of llama's
checkpoints.
"""
@@ -843,7 +843,8 @@ def _verify_load_format(self) -> None:
self.load_format = LoadFormat(load_format)
rocm_not_supported_load_format: List[str] = []
- if is_hip() and load_format in rocm_not_supported_load_format:
+ if current_platform.is_rocm(
+ ) and load_format in rocm_not_supported_load_format:
rocm_supported_load_format = [
f for f in LoadFormat.__members__
if (f not in rocm_not_supported_load_format)
@@ -967,7 +968,7 @@ def _verify_args(self) -> None:
if self.use_ray:
from vllm.executor import ray_utils
ray_utils.assert_ray_available()
- if is_hip():
+ if current_platform.is_rocm():
self.disable_custom_all_reduce = True
logger.info(
"Disabled the custom all-reduce kernel because it is not "
@@ -996,7 +997,7 @@ class SchedulerConfig:
prompt latency) before scheduling next prompt.
enable_chunked_prefill: If True, prefill requests can be chunked based
on the remaining max_num_batched_tokens.
- preemption_mode: Whether to perform preemption by swapping or
+ preemption_mode: Whether to perform preemption by swapping or
recomputation. If not specified, we determine the mode as follows:
We use recomputation by default since it incurs lower overhead than
swapping. However, when the sequence group has multiple sequences
@@ -1215,7 +1216,7 @@ def maybe_create_spec_config(
typical_acceptance_sampler_posterior_threshold (Optional[float]):
A threshold value that sets a lower bound on the posterior
probability of a token in the target model for it to be
- accepted. This threshold is used only when we use the
+ accepted. This threshold is used only when we use the
TypicalAcceptanceSampler for token acceptance.
typical_acceptance_sampler_posterior_alpha (Optional[float]):
A scaling factor for the entropy-based threshold in the
@@ -1225,7 +1226,7 @@ def maybe_create_spec_config(
If set to False, token log probabilities are returned
according to the log probability settings in SamplingParams.
If not specified, it defaults to True.
-
+
Returns:
Optional["SpeculativeConfig"]: An instance of SpeculativeConfig if
the necessary conditions are met, else None.
@@ -1470,13 +1471,13 @@ def __init__(
typical_acceptance_sampler_posterior_threshold (Optional[float]):
A threshold value that sets a lower bound on the posterior
probability of a token in the target model for it to be
- accepted. This threshold is used only when we use the
+ accepted. This threshold is used only when we use the
TypicalAcceptanceSampler for token acceptance.
typical_acceptance_sampler_posterior_alpha (Optional[float]):
A scaling factor for the entropy-based threshold in the
TypicalAcceptanceSampler.
disable_logprobs: If set to True, token log probabilities will not
- be returned even if requested by sampling parameters. This
+ be returned even if requested by sampling parameters. This
reduces latency by skipping logprob calculation in proposal
sampling, target sampling, and after accepted tokens are
determined. If set to False, log probabilities will be
@@ -1843,10 +1844,10 @@ def get_min_sliding_window(
def get_served_model_name(model: str,
served_model_name: Optional[Union[str, List[str]]]):
"""
- If the input is a non-empty list, the first model_name in
- `served_model_name` is taken.
- If the input is a non-empty string, it is used directly.
- For cases where the input is either an empty string or an
+ If the input is a non-empty list, the first model_name in
+ `served_model_name` is taken.
+ If the input is a non-empty string, it is used directly.
+ For cases where the input is either an empty string or an
empty list, the fallback is to use `self.model`.
"""
if not served_model_name:
diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py
index 0af7b3386d895..aa546ebada473 100644
--- a/vllm/executor/ray_utils.py
+++ b/vllm/executor/ray_utils.py
@@ -10,7 +10,7 @@
from vllm.logger import init_logger
from vllm.platforms import current_platform
from vllm.sequence import ExecuteModelRequest, IntermediateTensors
-from vllm.utils import get_ip, is_hip
+from vllm.utils import get_ip
from vllm.worker.worker_base import WorkerWrapperBase
logger = init_logger(__name__)
@@ -231,7 +231,7 @@ def initialize_ray_cluster(
assert_ray_available()
# Connect to a ray cluster.
- if is_hip() or current_platform.is_xpu():
+ if current_platform.is_rocm() or current_platform.is_xpu():
ray.init(address=ray_address,
ignore_reinit_error=True,
num_gpus=parallel_config.world_size)
diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py
index 71eed6eb68d78..83910339f3c9f 100644
--- a/vllm/model_executor/custom_op.py
+++ b/vllm/model_executor/custom_op.py
@@ -7,7 +7,7 @@
from vllm.compilation.levels import CompilationLevel
from vllm.logger import init_logger
from vllm.platforms import current_platform
-from vllm.utils import is_hip, print_warning_once
+from vllm.utils import print_warning_once
logger = init_logger(__name__)
@@ -72,7 +72,7 @@ def dispatch_forward(self):
if not enabled:
return self.forward_native
- if is_hip():
+ if current_platform.is_rocm():
return self.forward_hip
elif current_platform.is_cpu():
return self.forward_cpu
diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py
index c21aaa40ff2cc..be3d3985a74ad 100644
--- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py
+++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py
@@ -14,7 +14,8 @@
from vllm.model_executor.layers.quantization.utils.w8a8_utils import (
all_close_1d, normalize_e4m3fn_to_e4m3fnuz, per_tensor_dequantize)
from vllm.model_executor.utils import set_weight_attrs
-from vllm.utils import is_hip, print_warning_once
+from vllm.platforms import current_platform
+from vllm.utils import print_warning_once
class GPTQMarlinState(Enum):
@@ -150,7 +151,7 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
layer.w2_input_scale.max(), requires_grad=False)
# If rocm, normalize the weights and scales to e4m3fnuz
- if is_hip():
+ if current_platform.is_rocm():
# Normalize the weights and scales
w13_weight, w13_weight_scale, w13_input_scale = \
normalize_e4m3fn_to_e4m3fnuz(
diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py
index 7270b302ef965..73cc8ce0d2a4b 100644
--- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py
+++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py
@@ -12,7 +12,7 @@
from vllm.model_executor.parameter import (ChannelQuantScaleParameter,
ModelWeightParameter,
PerTensorScaleParameter)
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
__all__ = ["CompressedTensorsW8A8Fp8"]
@@ -40,7 +40,7 @@ def process_weights_after_loading(self, layer) -> None:
logical_widths=layer.logical_widths,
)
- if is_hip():
+ if current_platform.is_rocm():
weight, max_w_scale, input_scale = normalize_e4m3fn_to_e4m3fnuz(
weight=weight,
weight_scale=max_w_scale,
@@ -56,7 +56,7 @@ def process_weights_after_loading(self, layer) -> None:
elif self.strategy == QuantizationStrategy.CHANNEL:
weight = layer.weight
- if is_hip():
+ if current_platform.is_rocm():
weight, weight_scale, input_scale = \
normalize_e4m3fn_to_e4m3fnuz(
weight=weight,
diff --git a/vllm/model_executor/layers/quantization/fbgemm_fp8.py b/vllm/model_executor/layers/quantization/fbgemm_fp8.py
index f26907176ad1a..825d01d1b3551 100644
--- a/vllm/model_executor/layers/quantization/fbgemm_fp8.py
+++ b/vllm/model_executor/layers/quantization/fbgemm_fp8.py
@@ -19,7 +19,6 @@
from vllm.model_executor.parameter import (ChannelQuantScaleParameter,
ModelWeightParameter)
from vllm.platforms import current_platform
-from vllm.utils import is_hip
logger = init_logger(__name__)
@@ -127,7 +126,7 @@ def process_weights_after_loading(self, layer: Module) -> None:
weight = layer.weight
- if is_hip():
+ if current_platform.is_rocm():
weight, weight_scale, input_scale = \
normalize_e4m3fn_to_e4m3fnuz(
weight=weight,
diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py
index b5feb55db0e74..d34579b7099bb 100644
--- a/vllm/model_executor/layers/quantization/fp8.py
+++ b/vllm/model_executor/layers/quantization/fp8.py
@@ -26,7 +26,7 @@
PerTensorScaleParameter)
from vllm.model_executor.utils import set_weight_attrs
from vllm.platforms import current_platform
-from vllm.utils import is_hip, print_warning_once
+from vllm.utils import print_warning_once
ACTIVATION_SCHEMES = ["static", "dynamic"]
@@ -123,7 +123,7 @@ def __init__(self, quant_config: Fp8Config):
self.use_marlin = (not current_platform.has_device_capability(89)
or envs.VLLM_TEST_FORCE_FP8_MARLIN)
# Disable marlin for rocm
- if is_hip():
+ if current_platform.is_rocm():
self.use_marlin = False
def create_weights(
@@ -226,7 +226,7 @@ def process_weights_after_loading(self, layer: Module) -> None:
weight_scale = layer.weight_scale
# If rocm, use float8_e4m3fnuz.
- if is_hip():
+ if current_platform.is_rocm():
weight, weight_scale, input_scale = \
normalize_e4m3fn_to_e4m3fnuz(
weight=weight,
@@ -372,7 +372,7 @@ def process_weights_after_loading(self, layer: Module) -> None:
if not self.quant_config.is_checkpoint_fp8_serialized:
# If rocm, use float8_e4m3fnuz as dtype
fp8_dtype = torch.float8_e4m3fnuz \
- if is_hip() else torch.float8_e4m3fn
+ if current_platform.is_rocm() else torch.float8_e4m3fn
w13_weight = torch.empty_like(layer.w13_weight.data,
dtype=fp8_dtype)
w2_weight = torch.empty_like(layer.w2_weight.data, dtype=fp8_dtype)
@@ -420,7 +420,7 @@ def process_weights_after_loading(self, layer: Module) -> None:
layer.w2_input_scale = torch.nn.Parameter(
layer.w2_input_scale.max(), requires_grad=False)
# If rocm, normalize the weights and scales to e4m3fnuz
- if is_hip():
+ if current_platform.is_rocm():
# Normalize the weights and scales
w13_weight, w13_weight_scale, w13_input_scale = \
normalize_e4m3fn_to_e4m3fnuz(
diff --git a/vllm/model_executor/layers/quantization/utils/w8a8_utils.py b/vllm/model_executor/layers/quantization/utils/w8a8_utils.py
index 411af922149fd..1879d2855d93d 100644
--- a/vllm/model_executor/layers/quantization/utils/w8a8_utils.py
+++ b/vllm/model_executor/layers/quantization/utils/w8a8_utils.py
@@ -4,16 +4,16 @@
from vllm import _custom_ops as ops
from vllm.platforms import current_platform
-from vllm.utils import is_hip
# Input scaling factors are no longer optional in _scaled_mm starting
# from pytorch 2.5. Allocating a dummy tensor to pass as input_scale
-TORCH_DEVICE_IDENTITY = torch.ones(1).cuda() if is_hip() else None
+TORCH_DEVICE_IDENTITY = torch.ones(1).cuda() \
+ if current_platform.is_rocm() else None
def cutlass_fp8_supported() -> bool:
# cutlass is not supported on Rocm
- if is_hip():
+ if current_platform.is_rocm():
return False
capability_tuple = current_platform.get_device_capability()
diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py
index 4126ceb7117d4..22f194c776b69 100644
--- a/vllm/model_executor/models/exaone.py
+++ b/vllm/model_executor/models/exaone.py
@@ -49,9 +49,9 @@
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name)
from vllm.model_executor.sampling_metadata import SamplingMetadata
+from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs.exaone import ExaoneConfig
-from vllm.utils import is_hip
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (PPMissingLayer, is_pp_missing_parameter,
@@ -595,7 +595,7 @@ def load_kv_cache_scales(self, quantization_param_path: str) -> None:
if not isinstance(self.transformer.h[layer_idx], nn.Identity):
layer_self_attn = self.transformer.h[layer_idx].attn
- if is_hip():
+ if current_platform.is_rocm():
# The scaling factor convention we are assuming is
# quantized_value * scaling_factor ~= true_value
# which is consistent with the practice of setting
diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py
index 5a397ed8ff6a0..c968817747754 100644
--- a/vllm/model_executor/models/granite.py
+++ b/vllm/model_executor/models/granite.py
@@ -49,8 +49,8 @@
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name)
from vllm.model_executor.sampling_metadata import SamplingMetadata
+from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
-from vllm.utils import is_hip
from .interfaces import SupportsLoRA, SupportsPP
from .utils import PPMissingLayer, is_pp_missing_parameter, make_layers
@@ -534,7 +534,7 @@ def load_kv_cache_scales(self, quantization_param_path: str) -> None:
if not isinstance(self.model.layers[layer_idx], nn.Identity):
layer_self_attn = self.model.layers[layer_idx].self_attn
- if is_hip():
+ if current_platform.is_rocm():
# The scaling factor convention we are assuming is
# quantized_value * scaling_factor ~= true_value
# which is consistent with the practice of setting
diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py
index c346e3e808e3f..b0ca1fe006239 100644
--- a/vllm/model_executor/models/llama.py
+++ b/vllm/model_executor/models/llama.py
@@ -50,8 +50,8 @@
default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name)
from vllm.model_executor.pooling_metadata import PoolingMetadata
from vllm.model_executor.sampling_metadata import SamplingMetadata
+from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors, PoolerOutput
-from vllm.utils import is_hip
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (AutoWeightsLoader, PPMissingLayer, is_pp_missing_parameter,
@@ -423,7 +423,7 @@ def load_kv_cache_scales(self, quantization_param_path: str) -> None:
if not isinstance(self.layers[layer_idx], nn.Identity):
layer_self_attn = self.layers[layer_idx].self_attn
- if is_hip():
+ if current_platform.is_rocm():
# The scaling factor convention we are assuming is
# quantized_value * scaling_factor ~= true_value
# which is consistent with the practice of setting
diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py
index f6713ab0898f0..595a9256f958e 100644
--- a/vllm/model_executor/models/registry.py
+++ b/vllm/model_executor/models/registry.py
@@ -12,7 +12,7 @@
import torch.nn as nn
from vllm.logger import init_logger
-from vllm.utils import is_hip
+from vllm.platforms import current_platform
from .interfaces import (has_inner_state, is_attention_free,
supports_multimodal, supports_pp)
@@ -247,7 +247,7 @@ def _try_load_model_cls(
model_arch: str,
model: _BaseRegisteredModel,
) -> Optional[Type[nn.Module]]:
- if is_hip():
+ if current_platform.is_rocm():
if model_arch in _ROCM_UNSUPPORTED_MODELS:
raise ValueError(f"Model architecture '{model_arch}' is not "
"supported by ROCm for now.")
diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py
index 5a3dd3c02b85b..e3e7ccb5cf179 100644
--- a/vllm/model_executor/models/solar.py
+++ b/vllm/model_executor/models/solar.py
@@ -49,8 +49,8 @@
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name)
from vllm.model_executor.sampling_metadata import SamplingMetadata
+from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
-from vllm.utils import is_hip
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (PPMissingLayer, is_pp_missing_parameter,
@@ -558,7 +558,7 @@ def load_kv_cache_scales(self, quantization_param_path: str) -> None:
if not isinstance(self.model.layers[layer_idx], nn.Identity):
layer_self_attn = self.model.layers[layer_idx].self_attn
- if is_hip():
+ if current_platform.is_rocm():
# The scaling factor convention we are assuming is
# quantized_value * scaling_factor ~= true_value
# which is consistent with the practice of setting
diff --git a/vllm/utils.py b/vllm/utils.py
index d4f2c936ca9cc..c3f9a6bdd8b80 100644
--- a/vllm/utils.py
+++ b/vllm/utils.py
@@ -314,10 +314,6 @@ def reset(self):
self._index = 0
-def is_hip() -> bool:
- return torch.version.hip is not None
-
-
@lru_cache(maxsize=None)
def get_max_shared_memory_bytes(gpu: int = 0) -> int:
"""Returns the maximum shared memory per thread block in bytes."""
@@ -1098,7 +1094,7 @@ def _cuda_device_count_stateless(
if not torch.cuda._is_compiled():
return 0
- if is_hip():
+ if current_platform.is_rocm():
# ROCm uses amdsmi instead of nvml for stateless device count
# This requires a sufficiently modern version of Torch 2.4.0
raw_count = torch.cuda._device_count_amdsmi() if (hasattr(
diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py
index 4a287e3741d0f..233a9e664d845 100644
--- a/vllm/worker/model_runner.py
+++ b/vllm/worker/model_runner.py
@@ -41,6 +41,7 @@
from vllm.model_executor.models.utils import set_cpu_offload_max_bytes
from vllm.multimodal import (MULTIMODAL_REGISTRY, BatchedTensorInputs,
MultiModalInputs, MultiModalRegistry)
+from vllm.platforms import current_platform
from vllm.prompt_adapter.layers import PromptAdapterMapping
from vllm.prompt_adapter.request import PromptAdapterRequest
from vllm.prompt_adapter.worker_manager import (
@@ -49,7 +50,7 @@
from vllm.sequence import IntermediateTensors, SequenceGroupMetadata
from vllm.transformers_utils.config import uses_mrope
from vllm.utils import (DeviceMemoryProfiler, PyObjectCache, async_tensor_h2d,
- flatten_2d_lists, is_hip, is_pin_memory_available,
+ flatten_2d_lists, is_pin_memory_available,
supports_dynamo, weak_ref_tensor)
from vllm.worker.model_runner_base import (
ModelRunnerBase, ModelRunnerInputBase, ModelRunnerInputBuilderBase,
@@ -737,13 +738,13 @@ def _get_cuda_graph_pad_size(self,
family of functions.
Args:
- num_seqs (int): Number of sequences scheduled to run.
+ num_seqs (int): Number of sequences scheduled to run.
max_decode_seq_len (int): Greatest of all the decode sequence
lengths. Used only in checking the viablility of using
CUDA graphs.
max_encoder_seq_len (int, optional): Greatest of all the encode
sequence lengths. Defaults to 0. Used only in checking the
- viability of using CUDA graphs.
+ viability of using CUDA graphs.
Returns:
int: Returns the determined number of padding sequences. If
CUDA graphs is not viable, returns -1.
@@ -1103,7 +1104,7 @@ def load_model(self) -> None:
self.prompt_adapter_manager.create_prompt_adapter_manager(
self.model))
- if self.kv_cache_dtype == "fp8" and is_hip():
+ if self.kv_cache_dtype == "fp8" and current_platform.is_rocm():
# Currently only ROCm accepts kv-cache scaling factors
# via quantization_param_path and this will be deprecated
# in the future.
From 32176fee733b76b295346870d717d44cb7102944 Mon Sep 17 00:00:00 2001
From: youkaichao
Date: Sun, 27 Oct 2024 21:58:04 -0700
Subject: [PATCH 06/38] [torch.compile] support moe models (#9632)
Signed-off-by: youkaichao
---
benchmarks/kernels/benchmark_moe.py | 33 +++---
tests/compile/test_basic_correctness.py | 4 +-
tests/kernels/test_awq_marlin.py | 21 ++--
tests/kernels/test_moe.py | 7 +-
.../layers/fused_moe/__init__.py | 28 ++++-
.../layers/fused_moe/fused_marlin_moe.py | 51 +++++++--
.../layers/fused_moe/fused_moe.py | 100 ++++++++++++++++--
vllm/model_executor/layers/fused_moe/layer.py | 29 +++--
.../layers/quantization/awq_marlin.py | 7 +-
.../compressed_tensors_moe.py | 7 +-
.../layers/quantization/gptq_marlin.py | 6 +-
vllm/model_executor/models/granitemoe.py | 2 +
12 files changed, 217 insertions(+), 78 deletions(-)
diff --git a/benchmarks/kernels/benchmark_moe.py b/benchmarks/kernels/benchmark_moe.py
index c2ad98b7e2656..4f88e8e6eb1a6 100644
--- a/benchmarks/kernels/benchmark_moe.py
+++ b/benchmarks/kernels/benchmark_moe.py
@@ -88,22 +88,23 @@ def prepare(i: int):
input_gating.copy_(gating_output[i])
def run():
- fused_moe(
- x,
- w1,
- w2,
- input_gating,
- topk,
- renormalize=True,
- inplace=True,
- override_config=config,
- use_fp8_w8a8=use_fp8_w8a8,
- use_int8_w8a16=use_int8_w8a16,
- w1_scale=w1_scale,
- w2_scale=w2_scale,
- a1_scale=a1_scale,
- a2_scale=a2_scale,
- )
+ from vllm.model_executor.layers.fused_moe import override_config
+ with override_config(config):
+ fused_moe(
+ x,
+ w1,
+ w2,
+ input_gating,
+ topk,
+ renormalize=True,
+ inplace=True,
+ use_fp8_w8a8=use_fp8_w8a8,
+ use_int8_w8a16=use_int8_w8a16,
+ w1_scale=w1_scale,
+ w2_scale=w2_scale,
+ a1_scale=a1_scale,
+ a2_scale=a2_scale,
+ )
# JIT compilation & warmup
run()
diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py
index 77c56d91d0a8b..6aa27b24b4a6e 100644
--- a/tests/compile/test_basic_correctness.py
+++ b/tests/compile/test_basic_correctness.py
@@ -13,11 +13,11 @@
@pytest.mark.parametrize(
"model, model_args, pp_size, tp_size, attn_backend, method, fullgraph",
[
- ("meta-llama/Llama-3.2-1B", [], 2, 2, "FLASH_ATTN", "generate", True),
+ ("meta-llama/Llama-3.2-1B", [], 2, 2, "FLASHINFER", "generate", True),
("nm-testing/Meta-Llama-3-8B-Instruct-W8A8-Dyn-Per-Token-2048-Samples",
["--quantization", "compressed-tensors"
], 1, 1, "FLASH_ATTN", "generate", True),
- ("google/gemma-2-2b-it", [], 1, 2, "FLASHINFER", "generate", True),
+ ("ibm/PowerMoE-3b", [], 1, 2, "FLASH_ATTN", "generate", True),
# TODO: add multi-modality test for llava
("llava-hf/llava-1.5-7b-hf", [], 2, 1, "FLASHINFER", "generate", False)
])
diff --git a/tests/kernels/test_awq_marlin.py b/tests/kernels/test_awq_marlin.py
index 0f0a2b24563fd..59917dd2c58ad 100644
--- a/tests/kernels/test_awq_marlin.py
+++ b/tests/kernels/test_awq_marlin.py
@@ -5,11 +5,10 @@
import pytest
import torch
+import vllm.model_executor.layers.fused_moe # noqa
from tests.kernels.utils import (compute_max_diff, stack_and_dev, torch_moe,
torch_moe_single)
from vllm import _custom_ops as ops
-from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
- fused_marlin_moe, single_marlin_moe)
from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk
from vllm.model_executor.layers.quantization.utils.marlin_utils_test import (
awq_marlin_quantize)
@@ -81,7 +80,7 @@ def test_fused_marlin_moe_awq(
score = torch.randn((m, e), device="cuda", dtype=dtype)
topk_weights, topk_ids = fused_topk(a, score, topk, False)
- marlin_output = fused_marlin_moe(
+ marlin_output = torch.ops.vllm.fused_marlin_moe(
a,
qweight1,
qweight2,
@@ -150,14 +149,14 @@ def test_single_marlin_moe_multiply_awq(
score = torch.randn((m, e), device="cuda", dtype=dtype)
- marlin_output = single_marlin_moe(a,
- qweight,
- scales,
- score,
- topk,
- renormalize=False,
- w_zeros=zp,
- num_bits=num_bits)
+ marlin_output = torch.ops.vllm.single_marlin_moe(a,
+ qweight,
+ scales,
+ score,
+ topk,
+ renormalize=False,
+ w_zeros=zp,
+ num_bits=num_bits)
torch_output = torch_moe_single(a, w_ref.transpose(1, 2), score, topk)
diff --git a/tests/kernels/test_moe.py b/tests/kernels/test_moe.py
index 4bfc089c82179..70906ab2187bc 100644
--- a/tests/kernels/test_moe.py
+++ b/tests/kernels/test_moe.py
@@ -7,12 +7,11 @@
from transformers import MixtralConfig
from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
+import vllm.model_executor.layers.fused_moe # noqa
from tests.kernels.utils import (compute_max_diff, opcheck, stack_and_dev,
torch_moe, torch_moe_single)
from vllm import _custom_ops as ops
from vllm.model_executor.layers.fused_moe import fused_moe
-from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
- fused_marlin_moe, single_marlin_moe)
from vllm.model_executor.layers.fused_moe.fused_moe import (
fused_topk, moe_align_block_size)
from vllm.model_executor.layers.quantization.utils.marlin_utils_test import (
@@ -193,7 +192,7 @@ def test_fused_marlin_moe(
topk,
renormalize=False,
)
- marlin_output = fused_marlin_moe(
+ marlin_output = torch.ops.vllm.fused_marlin_moe(
a,
qweight1,
qweight2,
@@ -309,7 +308,7 @@ def test_single_marlin_moe_multiply(
sort_indices = stack_and_dev(sort_indices_l)
score = torch.randn((m, e), device="cuda", dtype=dtype)
- marlin_output = single_marlin_moe(
+ marlin_output = torch.ops.vllm.single_marlin_moe(
a,
qweight,
scales,
diff --git a/vllm/model_executor/layers/fused_moe/__init__.py b/vllm/model_executor/layers/fused_moe/__init__.py
index e9b5703ca28be..c4223d12600ac 100644
--- a/vllm/model_executor/layers/fused_moe/__init__.py
+++ b/vllm/model_executor/layers/fused_moe/__init__.py
@@ -1,23 +1,43 @@
+from contextlib import contextmanager
+from typing import Any, Dict, Optional
+
from vllm.model_executor.layers.fused_moe.layer import (
FusedMoE, FusedMoEMethodBase, FusedMoeWeightScaleSupported)
from vllm.triton_utils import HAS_TRITON
+_config: Optional[Dict[str, Any]] = None
+
+
+@contextmanager
+def override_config(config):
+ global _config
+ old_config = _config
+ _config = config
+ yield
+ _config = old_config
+
+
+def get_config() -> Optional[Dict[str, Any]]:
+ return _config
+
+
__all__ = [
"FusedMoE",
"FusedMoEMethodBase",
"FusedMoeWeightScaleSupported",
+ "override_config",
+ "get_config",
]
if HAS_TRITON:
- from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
- fused_marlin_moe, single_marlin_moe)
+ # import to register the custom ops
+ import vllm.model_executor.layers.fused_moe.fused_marlin_moe # noqa
+ import vllm.model_executor.layers.fused_moe.fused_moe # noqa
from vllm.model_executor.layers.fused_moe.fused_moe import (
fused_experts, fused_moe, fused_topk, get_config_file_name,
grouped_topk)
__all__ += [
- "fused_marlin_moe",
- "single_marlin_moe",
"fused_moe",
"fused_topk",
"fused_experts",
diff --git a/vllm/model_executor/layers/fused_moe/fused_marlin_moe.py b/vllm/model_executor/layers/fused_moe/fused_marlin_moe.py
index 5ae40a2af5a2b..93019d0d0abb6 100644
--- a/vllm/model_executor/layers/fused_moe/fused_marlin_moe.py
+++ b/vllm/model_executor/layers/fused_moe/fused_marlin_moe.py
@@ -1,6 +1,6 @@
"""Fused MoE utilities for GPTQ."""
import functools
-from typing import Any, Dict, Optional
+from typing import Optional
import torch
@@ -18,6 +18,7 @@ def get_scalar_type(num_bits: int, has_zp: bool):
return scalar_types.uint4b8 if num_bits == 4 else scalar_types.uint8b128
+@torch.library.custom_op("vllm::single_marlin_moe", mutates_args=[])
def single_marlin_moe(
hidden_states: torch.Tensor,
w: torch.Tensor,
@@ -28,7 +29,6 @@ def single_marlin_moe(
g_idx: Optional[torch.Tensor] = None,
sort_indices: Optional[torch.Tensor] = None,
w_zeros: Optional[torch.Tensor] = None,
- override_config: Optional[Dict[str, Any]] = None,
num_bits: int = 8,
is_k_full: bool = True,
) -> torch.Tensor:
@@ -49,8 +49,6 @@ def single_marlin_moe(
- topk (int): The number of top-k experts to select.
- renormalize (bool): If True, renormalize the top-k weights to sum to 1.
- w_zeros (Optional[torch.Tensor]): Optional zero points to be used for w.
- - override_config (Optional[Dict[str, Any]]): Optional override
- for the kernel configuration.
- num_bits (bool): The number of bits in expert weights quantization.
Returns:
@@ -79,7 +77,6 @@ def single_marlin_moe(
w.shape,
topk_ids.shape[1],
None,
- override_config=override_config,
is_marlin=True)
config = get_config_func(M)
@@ -122,6 +119,24 @@ def single_marlin_moe(
return torch.sum(intermediate_cache.view(*intermediate_cache.shape), dim=1)
+@single_marlin_moe.register_fake
+def _(
+ hidden_states: torch.Tensor,
+ w: torch.Tensor,
+ scales: torch.Tensor,
+ gating_output: torch.Tensor,
+ topk: int,
+ renormalize: bool,
+ g_idx: Optional[torch.Tensor] = None,
+ sort_indices: Optional[torch.Tensor] = None,
+ w_zeros: Optional[torch.Tensor] = None,
+ num_bits: int = 8,
+ is_k_full: bool = True,
+) -> torch.Tensor:
+ return torch.empty_like(hidden_states)
+
+
+@torch.library.custom_op("vllm::fused_marlin_moe", mutates_args=[])
def fused_marlin_moe(
hidden_states: torch.Tensor,
w1: torch.Tensor,
@@ -137,7 +152,6 @@ def fused_marlin_moe(
sort_indices2: Optional[torch.Tensor] = None,
w1_zeros: Optional[torch.Tensor] = None,
w2_zeros: Optional[torch.Tensor] = None,
- override_config: Optional[Dict[str, Any]] = None,
num_bits: int = 8,
is_k_full: bool = True,
) -> torch.Tensor:
@@ -161,8 +175,6 @@ def fused_marlin_moe(
permutation.
- topk_weights (torch.Tensor): Top-k weights.
- topk_ids (torch.Tensor): Indices of topk-k elements.
- - override_config (Optional[Dict[str, Any]]): Optional override
- for the kernel configuration.
- w1_zeros (Optional[torch.Tensor]): Optional zero points to be used for w1.
- w2_zeros (Optional[torch.Tensor]): Optional zero points to be used for w2.
- num_bits (bool): The number of bits in expert weights quantization.
@@ -209,7 +221,6 @@ def fused_marlin_moe(
w2.shape,
topk_ids.shape[1],
None,
- override_config=override_config,
is_marlin=True,
)
config = get_config_func(M)
@@ -311,3 +322,25 @@ def fused_marlin_moe(
return torch.sum(intermediate_cache3.view(*intermediate_cache3.shape),
dim=1)
+
+
+@fused_marlin_moe.register_fake
+def _(
+ hidden_states: torch.Tensor,
+ w1: torch.Tensor,
+ w2: torch.Tensor,
+ w1_scale: torch.Tensor,
+ w2_scale: torch.Tensor,
+ gating_output: torch.Tensor,
+ topk_weights: torch.Tensor,
+ topk_ids: torch.Tensor,
+ g_idx1: Optional[torch.Tensor] = None,
+ g_idx2: Optional[torch.Tensor] = None,
+ sort_indices1: Optional[torch.Tensor] = None,
+ sort_indices2: Optional[torch.Tensor] = None,
+ w1_zeros: Optional[torch.Tensor] = None,
+ w2_zeros: Optional[torch.Tensor] = None,
+ num_bits: int = 8,
+ is_k_full: bool = True,
+) -> torch.Tensor:
+ return torch.empty_like(hidden_states)
diff --git a/vllm/model_executor/layers/fused_moe/fused_moe.py b/vllm/model_executor/layers/fused_moe/fused_moe.py
index 90a4209b5bce5..1cf5c2253ca0b 100644
--- a/vllm/model_executor/layers/fused_moe/fused_moe.py
+++ b/vllm/model_executor/layers/fused_moe/fused_moe.py
@@ -358,9 +358,10 @@ def try_get_optimal_moe_config(
top_k: int,
dtype: Optional[str],
M: int,
- override_config: Optional[Dict[str, Any]] = None,
is_marlin: bool = False,
):
+ from vllm.model_executor.layers.fused_moe import get_config
+ override_config = get_config()
if override_config:
config = override_config
else:
@@ -465,19 +466,109 @@ def get_config_dtype_str(dtype: torch.dtype,
return None
+@torch.library.custom_op("vllm::inplace_fused_experts",
+ mutates_args=["hidden_states"])
+def inplace_fused_experts(hidden_states: torch.Tensor,
+ w1: torch.Tensor,
+ w2: torch.Tensor,
+ topk_weights: torch.Tensor,
+ topk_ids: torch.Tensor,
+ use_fp8_w8a8: bool = False,
+ use_int8_w8a16: bool = False,
+ w1_scale: Optional[torch.Tensor] = None,
+ w2_scale: Optional[torch.Tensor] = None,
+ a1_scale: Optional[torch.Tensor] = None,
+ a2_scale: Optional[torch.Tensor] = None) -> None:
+ fused_experts_impl(hidden_states, w1, w2, topk_weights, topk_ids, True,
+ use_fp8_w8a8, use_int8_w8a16, w1_scale, w2_scale,
+ a1_scale, a2_scale)
+
+
+@inplace_fused_experts.register_fake
+def _(hidden_states: torch.Tensor,
+ w1: torch.Tensor,
+ w2: torch.Tensor,
+ topk_weights: torch.Tensor,
+ topk_ids: torch.Tensor,
+ use_fp8_w8a8: bool = False,
+ use_int8_w8a16: bool = False,
+ w1_scale: Optional[torch.Tensor] = None,
+ w2_scale: Optional[torch.Tensor] = None,
+ a1_scale: Optional[torch.Tensor] = None,
+ a2_scale: Optional[torch.Tensor] = None) -> None:
+ pass
+
+
+@torch.library.custom_op("vllm::outplace_fused_experts", mutates_args=[])
+def outplace_fused_experts(
+ hidden_states: torch.Tensor,
+ w1: torch.Tensor,
+ w2: torch.Tensor,
+ topk_weights: torch.Tensor,
+ topk_ids: torch.Tensor,
+ use_fp8_w8a8: bool = False,
+ use_int8_w8a16: bool = False,
+ w1_scale: Optional[torch.Tensor] = None,
+ w2_scale: Optional[torch.Tensor] = None,
+ a1_scale: Optional[torch.Tensor] = None,
+ a2_scale: Optional[torch.Tensor] = None) -> torch.Tensor:
+ return fused_experts_impl(hidden_states, w1, w2, topk_weights, topk_ids,
+ False, use_fp8_w8a8, use_int8_w8a16, w1_scale,
+ w2_scale, a1_scale, a2_scale)
+
+
+@outplace_fused_experts.register_fake
+def _(hidden_states: torch.Tensor,
+ w1: torch.Tensor,
+ w2: torch.Tensor,
+ topk_weights: torch.Tensor,
+ topk_ids: torch.Tensor,
+ use_fp8_w8a8: bool = False,
+ use_int8_w8a16: bool = False,
+ w1_scale: Optional[torch.Tensor] = None,
+ w2_scale: Optional[torch.Tensor] = None,
+ a1_scale: Optional[torch.Tensor] = None,
+ a2_scale: Optional[torch.Tensor] = None) -> torch.Tensor:
+ return torch.empty_like(hidden_states)
+
+
def fused_experts(hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
inplace: bool = False,
- override_config: Optional[Dict[str, Any]] = None,
use_fp8_w8a8: bool = False,
use_int8_w8a16: bool = False,
w1_scale: Optional[torch.Tensor] = None,
w2_scale: Optional[torch.Tensor] = None,
a1_scale: Optional[torch.Tensor] = None,
a2_scale: Optional[torch.Tensor] = None):
+ if inplace:
+ torch.ops.vllm.inplace_fused_experts(hidden_states, w1, w2,
+ topk_weights, topk_ids,
+ use_fp8_w8a8, use_int8_w8a16,
+ w1_scale, w2_scale, a1_scale,
+ a2_scale)
+ return hidden_states
+ else:
+ return torch.ops.vllm.outplace_fused_experts(
+ hidden_states, w1, w2, topk_weights, topk_ids, use_fp8_w8a8,
+ use_int8_w8a16, w1_scale, w2_scale, a1_scale, a2_scale)
+
+
+def fused_experts_impl(hidden_states: torch.Tensor,
+ w1: torch.Tensor,
+ w2: torch.Tensor,
+ topk_weights: torch.Tensor,
+ topk_ids: torch.Tensor,
+ inplace: bool = False,
+ use_fp8_w8a8: bool = False,
+ use_int8_w8a16: bool = False,
+ w1_scale: Optional[torch.Tensor] = None,
+ w2_scale: Optional[torch.Tensor] = None,
+ a1_scale: Optional[torch.Tensor] = None,
+ a2_scale: Optional[torch.Tensor] = None):
# Check constraints.
assert hidden_states.shape[1] == w1.shape[2], "Hidden size mismatch"
assert topk_weights.shape == topk_ids.shape, "topk shape mismatch"
@@ -504,7 +595,6 @@ def fused_experts(hidden_states: torch.Tensor,
w2.shape,
topk_ids.shape[1],
config_dtype,
- override_config=override_config,
)
config = get_config_func(M)
@@ -602,7 +692,6 @@ def fused_moe(
topk: int,
renormalize: bool,
inplace: bool = False,
- override_config: Optional[Dict[str, Any]] = None,
use_grouped_topk: bool = False,
num_expert_group: Optional[int] = None,
topk_group: Optional[int] = None,
@@ -628,8 +717,6 @@ def fused_moe(
- renormalize (bool): If True, renormalize the top-k weights to sum to 1.
- inplace (bool): If True, perform the operation in-place.
Defaults to False.
- - override_config (Optional[Dict[str, Any]]): Optional override
- for the kernel configuration.
- num_expert_group: Optional[int]: additional parameter for grouped_topk
- topk_group: Optional[int]: additional parameter for grouped_topk
- use_grouped_topk: If True, use grouped_topk instead of fused_topk
@@ -667,7 +754,6 @@ def fused_moe(
topk_weights,
topk_ids,
inplace=inplace,
- override_config=override_config,
use_fp8_w8a8=use_fp8_w8a8,
use_int8_w8a16=use_int8_w8a16,
w1_scale=w1_scale,
diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py
index 8dd36620e3fa0..5570771ac917b 100644
--- a/vllm/model_executor/layers/fused_moe/layer.py
+++ b/vllm/model_executor/layers/fused_moe/layer.py
@@ -12,7 +12,16 @@
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig, QuantizeMethodBase)
from vllm.model_executor.utils import set_weight_attrs
-
+from vllm.platforms import current_platform
+
+if current_platform.is_cuda_alike():
+ from .fused_moe import fused_experts
+else:
+ fused_experts = None # type: ignore
+if current_platform.is_tpu():
+ from .moe_pallas import fused_moe as fused_moe_pallas
+else:
+ fused_moe_pallas = None # type: ignore
logger = init_logger(__name__)
@@ -96,9 +105,6 @@ def forward_cuda(
num_expert_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None
) -> torch.Tensor:
- from vllm.model_executor.layers.fused_moe.fused_moe import (
- fused_experts)
-
topk_weights, topk_ids = FusedMoE.select_experts(
hidden_states=x,
router_logits=router_logits,
@@ -132,17 +138,18 @@ def forward_tpu(
num_expert_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None
) -> torch.Tensor:
- from vllm.model_executor.layers.fused_moe.moe_pallas import fused_moe
assert not use_grouped_topk
assert num_expert_group is None
assert topk_group is None
assert custom_routing_function is None
- return fused_moe(hidden_states=x,
- w1=layer.w13_weight,
- w2=layer.w2_weight,
- topk=top_k,
- gating_output=router_logits,
- renormalize=renormalize)
+ return fused_moe_pallas(hidden_states=x,
+ w1=layer.w13_weight,
+ w2=layer.w2_weight,
+ topk=top_k,
+ gating_output=router_logits,
+ renormalize=renormalize)
+
+ forward_native = forward_cuda
class FusedMoE(torch.nn.Module):
diff --git a/vllm/model_executor/layers/quantization/awq_marlin.py b/vllm/model_executor/layers/quantization/awq_marlin.py
index b3d93b285769c..95ec12daeeeb5 100644
--- a/vllm/model_executor/layers/quantization/awq_marlin.py
+++ b/vllm/model_executor/layers/quantization/awq_marlin.py
@@ -3,6 +3,7 @@
import torch
from torch.nn import Parameter
+import vllm.model_executor.layers.fused_moe # noqa
from vllm import _custom_ops as ops
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.layer import (
@@ -435,10 +436,6 @@ def apply(
topk_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None,
) -> torch.Tensor:
-
- from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
- fused_marlin_moe)
-
topk_weights, topk_ids = FusedMoE.select_experts(
hidden_states=x,
router_logits=router_logits,
@@ -449,7 +446,7 @@ def apply(
num_expert_group=num_expert_group,
custom_routing_function=custom_routing_function)
- return fused_marlin_moe(
+ return torch.ops.vllm.fused_marlin_moe(
x,
layer.w13_qweight,
layer.w2_qweight,
diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py
index be3d3985a74ad..dad04017d3212 100644
--- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py
+++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py
@@ -6,6 +6,7 @@
from compressed_tensors import CompressionFormat
from compressed_tensors.quantization import QuantizationStrategy
+import vllm.model_executor.layers.fused_moe # noqa
from vllm import _custom_ops as ops
from vllm.model_executor.layers.fused_moe import (FusedMoE, FusedMoEMethodBase,
FusedMoeWeightScaleSupported)
@@ -481,10 +482,6 @@ def apply(
topk_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None,
) -> torch.Tensor:
-
- from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
- fused_marlin_moe)
-
topk_weights, topk_ids = FusedMoE.select_experts(
hidden_states=x,
router_logits=router_logits,
@@ -495,7 +492,7 @@ def apply(
num_expert_group=num_expert_group,
custom_routing_function=custom_routing_function)
- return fused_marlin_moe(
+ return torch.ops.vllm.fused_marlin_moe(
x,
layer.w13_weight_packed,
layer.w2_weight_packed,
diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py
index e77191796bd7e..b97dd108d6785 100644
--- a/vllm/model_executor/layers/quantization/gptq_marlin.py
+++ b/vllm/model_executor/layers/quantization/gptq_marlin.py
@@ -2,6 +2,7 @@
import torch
+import vllm.model_executor.layers.fused_moe # noqa
from vllm import _custom_ops as ops
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.layer import (
@@ -536,9 +537,6 @@ def apply(
topk_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None,
) -> torch.Tensor:
- from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
- fused_marlin_moe)
-
# The input must currently be float16
orig_dtype = x.dtype
x = x.half()
@@ -553,7 +551,7 @@ def apply(
num_expert_group=num_expert_group,
custom_routing_function=None)
- return fused_marlin_moe(
+ return torch.ops.vllm.fused_marlin_moe(
x,
layer.w13_qweight,
layer.w2_qweight,
diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py
index fd0d4c89a28fe..5307bb21adb96 100644
--- a/vllm/model_executor/models/granitemoe.py
+++ b/vllm/model_executor/models/granitemoe.py
@@ -28,6 +28,7 @@
from transformers.models.granitemoe import GraniteMoeConfig
from vllm.attention import Attention, AttentionMetadata
+from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, LoRAConfig
from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size
from vllm.model_executor.layers.fused_moe import FusedMoE
@@ -244,6 +245,7 @@ def forward(
return hidden_states
+@support_torch_compile
class GraniteMoeModel(nn.Module):
def __init__(
From feb92fbe4ab6803527df48658a87ebd00b99969f Mon Sep 17 00:00:00 2001
From: Robert Shaw
<114415538+robertgshaw2-neuralmagic@users.noreply.github.com>
Date: Mon, 28 Oct 2024 02:59:37 -0400
Subject: [PATCH 07/38] Fix beam search eos (#9627)
---
vllm/engine/protocol.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/vllm/engine/protocol.py b/vllm/engine/protocol.py
index 5c504e0f0217d..b00dd136d4a47 100644
--- a/vllm/engine/protocol.py
+++ b/vllm/engine/protocol.py
@@ -140,7 +140,12 @@ async def beam_search(
best_beams = sorted_completed[:beam_width]
for beam in best_beams:
- beam.text = tokenizer.decode(beam.tokens[tokenized_length:])
+ if (beam.tokens[-1] == tokenizer.eos_token_id and not ignore_eos):
+ # Skip the eos token in the text.
+ tokens = beam.tokens[tokenized_length:-1]
+ else:
+ tokens = beam.tokens[tokenized_length:]
+ beam.text = tokenizer.decode(tokens)
beam_search_output = RequestOutput(
request_id=request_id,
From 2adb4409e0359039135b5aa6501994da12aa5a26 Mon Sep 17 00:00:00 2001
From: Yan Ma
Date: Mon, 28 Oct 2024 15:13:03 +0800
Subject: [PATCH 08/38] [Bugfix] Fix ray instance detect issue (#9439)
---
vllm/executor/ray_utils.py | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py
index aa546ebada473..993d279890820 100644
--- a/vllm/executor/ray_utils.py
+++ b/vllm/executor/ray_utils.py
@@ -232,9 +232,16 @@ def initialize_ray_cluster(
# Connect to a ray cluster.
if current_platform.is_rocm() or current_platform.is_xpu():
- ray.init(address=ray_address,
- ignore_reinit_error=True,
- num_gpus=parallel_config.world_size)
+ # Try to connect existing ray instance and create a new one if not found
+ try:
+ ray.init("auto")
+ except ConnectionError:
+ logger.warning(
+ "No existing RAY instance detected. "
+ "A new instance will be launched with current node resources.")
+ ray.init(address=ray_address,
+ ignore_reinit_error=True,
+ num_gpus=parallel_config.world_size)
else:
ray.init(address=ray_address, ignore_reinit_error=True)
From 8b0e4f2ad7b5a3ddd6d61acbe8ceb50b4ea3c309 Mon Sep 17 00:00:00 2001
From: Russell Bryant
Date: Mon, 28 Oct 2024 12:38:09 -0400
Subject: [PATCH 09/38] [CI/Build] Adopt Mergify for auto-labeling PRs (#9259)
Signed-off-by: Russell Bryant
---
.github/mergify.yml | 57 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
create mode 100644 .github/mergify.yml
diff --git a/.github/mergify.yml b/.github/mergify.yml
new file mode 100644
index 0000000000000..2a3dee7c662d1
--- /dev/null
+++ b/.github/mergify.yml
@@ -0,0 +1,57 @@
+pull_request_rules:
+- name: label-documentation
+ description: Automatically apply documentation label
+ conditions:
+ - or:
+ - files~=^[^/]+\.md$
+ - files~=^docs/
+ actions:
+ label:
+ add:
+ - documentation
+
+- name: label-ci-build
+ description: Automatically apply ci/build label
+ conditions:
+ - files~=^\.github/
+ - files~=\.buildkite/
+ - files~=^cmake/
+ - files=CMakeLists.txt
+ - files~=^Dockerfile
+ - files~=^requirements.*\.txt
+ - files=setup.py
+ actions:
+ label:
+ add:
+ - ci/build
+
+- name: label-frontend
+ description: Automatically apply frontend label
+ conditions:
+ - files~=^vllm/entrypoints/
+ actions:
+ label:
+ add:
+ - frontend
+
+- name: ping author on conflicts and add 'needs-rebase' label
+ conditions:
+ - conflict
+ - -closed
+ actions:
+ label:
+ add:
+ - needs-rebase
+ comment:
+ message: |
+ This pull request has merge conflicts that must be resolved before it can be
+ merged. @{{author}} please rebase it. https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork
+
+- name: remove 'needs-rebase' label when conflict is resolved
+ conditions:
+ - -conflict
+ - -closed
+ actions:
+ label:
+ remove:
+ - needs-rebase
From 5f8d8075f957d5376b2f1cc451e35a2a757e95a5 Mon Sep 17 00:00:00 2001
From: litianjian <45817262+litianjian@users.noreply.github.com>
Date: Tue, 29 Oct 2024 02:04:10 +0800
Subject: [PATCH 10/38] [Model][VLM] Add multi-video support for
LLaVA-Onevision (#8905)
Co-authored-by: litianjian
Co-authored-by: DarkLight1337
---
.../vision_language/test_llava_onevision.py | 173 +++++-------------
vllm/model_executor/models/clip.py | 4 +-
vllm/model_executor/models/llava_onevision.py | 94 +++++++---
vllm/model_executor/models/siglip.py | 4 +-
vllm/multimodal/video.py | 10 +-
5 files changed, 123 insertions(+), 162 deletions(-)
diff --git a/tests/models/decoder_only/vision_language/test_llava_onevision.py b/tests/models/decoder_only/vision_language/test_llava_onevision.py
index 367f25f446279..1616fd299b9aa 100644
--- a/tests/models/decoder_only/vision_language/test_llava_onevision.py
+++ b/tests/models/decoder_only/vision_language/test_llava_onevision.py
@@ -1,4 +1,4 @@
-from typing import List, Optional, Tuple, Type, overload
+from typing import List, Optional, Tuple, Type
import pytest
from transformers import (AutoConfig, AutoModelForVision2Seq, AutoTokenizer,
@@ -9,9 +9,8 @@
from vllm.sequence import SampleLogprobs
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE
-from ....conftest import (VIDEO_ASSETS, HfRunner, PromptImageInput, VllmRunner,
- _VideoAssets)
-from ....utils import large_gpu_test
+from ....conftest import (VIDEO_ASSETS, HfRunner, PromptImageInput,
+ PromptVideoInput, VllmRunner)
from ...utils import check_logprobs_close
# Video test
@@ -20,7 +19,7 @@
"<|im_start|>user\n
+---
+
+**vLLM x Snowfkale Meetup (Wednesday, November 13th, 5:30-8PM PT) at Snowfkale HQ, San Mateo**
+
+We are excited to announce the last in-person vLLM meetup of the year!
+Join the vLLM developers and engineers from Snowflake AI Research to chat about the latest LLM inference optimizations and your 2025 vLLM wishlist!
+Register [here](https://lu.ma/h0qvrajz) and be a part of the event!
+
+---
+
*Latest News* 🔥
-- [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there!
+- [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there!
- [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://raysummit.anyscale.com/flow/anyscale/raysummit2024/landing/page/sessioncatalog?tab.day=20241001&search.sessiontracks=1719251906298001uzJ2) from other vLLM contributors and users!
- [2024/09] We hosted [the sixth vLLM meetup](https://lu.ma/87q3nvnh) with NVIDIA! Please find the meetup slides [here](https://docs.google.com/presentation/d/1wrLGwytQfaOTd5wCGSPNhoaW3nq0E-9wqyP7ny93xRs/edit?usp=sharing).
- [2024/07] We hosted [the fifth vLLM meetup](https://lu.ma/lp0gyjqr) with AWS! Please find the meetup slides [here](https://docs.google.com/presentation/d/1RgUD8aCfcHocghoP3zmXzck9vX3RCI9yfUAB2Bbcl4Y/edit?usp=sharing).
@@ -42,7 +52,7 @@ vLLM is fast with:
- Speculative decoding
- Chunked prefill
-**Performance benchmark**: We include a performance benchmark at the end of [our blog post](https://blog.vllm.ai/2024/09/05/perf-update.html). It compares the performance of vLLM against other LLM serving engines ([TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM), [SGLang](https://github.com/sgl-project/sglang) and [LMDeploy](https://github.com/InternLM/lmdeploy)). The implementation is under [nightly-benchmarks folder](.buildkite/nightly-benchmarks/) and you can [reproduce](https://github.com/vllm-project/vllm/issues/8176) this benchmark using our one-click runnable script.
+**Performance benchmark**: We include a performance benchmark at the end of [our blog post](https://blog.vllm.ai/2024/09/05/perf-update.html). It compares the performance of vLLM against other LLM serving engines ([TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM), [SGLang](https://github.com/sgl-project/sglang) and [LMDeploy](https://github.com/InternLM/lmdeploy)). The implementation is under [nightly-benchmarks folder](.buildkite/nightly-benchmarks/) and you can [reproduce](https://github.com/vllm-project/vllm/issues/8176) this benchmark using our one-click runnable script.
vLLM is flexible and easy to use with:
From bc73e9821cb4f90a88c04e7d550f132d8911266b Mon Sep 17 00:00:00 2001
From: Michael Goin
Date: Tue, 29 Oct 2024 19:02:59 -0400
Subject: [PATCH 32/38] [Bugfix] Fix prefix strings for quantized VLMs (#9772)
---
vllm/model_executor/model_loader/loader.py | 11 +++-
vllm/model_executor/models/blip2.py | 5 +-
vllm/model_executor/models/gemma.py | 58 +++++++++++++------
vllm/model_executor/models/internlm2.py | 56 ++++++++++++------
vllm/model_executor/models/internlm2_ve.py | 16 +++--
vllm/model_executor/models/internvl.py | 5 +-
vllm/model_executor/models/llama.py | 7 ++-
vllm/model_executor/models/llava.py | 20 +++++--
vllm/model_executor/models/llava_next.py | 10 +++-
.../model_executor/models/llava_next_video.py | 10 +++-
vllm/model_executor/models/llava_onevision.py | 10 +++-
vllm/model_executor/models/minicpmv.py | 34 ++++++++---
vllm/model_executor/models/opt.py | 34 ++++++++---
vllm/model_executor/models/paligemma.py | 7 ++-
vllm/model_executor/models/phi3v.py | 19 ++++--
vllm/model_executor/models/pixtral.py | 5 +-
vllm/model_executor/models/qwen2.py | 50 +++++++++++-----
vllm/model_executor/models/qwen2_vl.py | 8 ++-
vllm/model_executor/models/ultravox.py | 5 +-
vllm/model_executor/models/utils.py | 15 +++++
20 files changed, 288 insertions(+), 97 deletions(-)
diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py
index 3cfee13b9fa6e..3ae8a51859f70 100644
--- a/vllm/model_executor/model_loader/loader.py
+++ b/vllm/model_executor/model_loader/loader.py
@@ -147,15 +147,20 @@ def _get_model_initialization_kwargs(
return extra_kwargs
-def build_model(model_class: Type[nn.Module], hf_config: PretrainedConfig,
+def build_model(model_class: Type[nn.Module],
+ hf_config: PretrainedConfig,
cache_config: Optional[CacheConfig],
- quant_config: Optional[QuantizationConfig], *,
+ quant_config: Optional[QuantizationConfig],
+ *,
lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig],
- scheduler_config: Optional[SchedulerConfig]) -> nn.Module:
+ scheduler_config: Optional[SchedulerConfig],
+ prefix: Optional[str] = None) -> nn.Module:
extra_kwargs = _get_model_initialization_kwargs(model_class, lora_config,
multimodal_config,
scheduler_config)
+ if prefix:
+ extra_kwargs["prefix"] = prefix
return model_class(config=hf_config,
cache_config=cache_config,
diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py
index cd2013e91514d..c3b3cc8a4ddb6 100644
--- a/vllm/model_executor/models/blip2.py
+++ b/vllm/model_executor/models/blip2.py
@@ -507,7 +507,10 @@ def __init__(self,
)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors)
diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py
index 436bd45d53f35..57b2b43c82f89 100644
--- a/vllm/model_executor/models/gemma.py
+++ b/vllm/model_executor/models/gemma.py
@@ -43,7 +43,8 @@
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (is_pp_missing_parameter,
- make_empty_intermediate_tensors_factory, make_layers)
+ make_empty_intermediate_tensors_factory, make_layers,
+ maybe_prefix)
logger = init_logger(__name__)
@@ -83,16 +84,23 @@ def __init__(
hidden_act: Optional[str] = None,
hidden_activation: Optional[str] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
- hidden_size, [intermediate_size] * 2,
+ hidden_size,
+ [intermediate_size] * 2,
bias=False,
- quant_config=quant_config)
- self.down_proj = RowParallelLinear(intermediate_size,
- hidden_size,
- bias=False,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.gate_up_proj",
+ )
+ self.down_proj = RowParallelLinear(
+ intermediate_size,
+ hidden_size,
+ bias=False,
+ quant_config=quant_config,
+ prefix=f"{prefix}.down_proj",
+ )
self.act_fn = _get_gemma_act_fn(hidden_act, hidden_activation)
def forward(self, x):
@@ -104,15 +112,18 @@ def forward(self, x):
class GemmaAttention(nn.Module):
- def __init__(self,
- hidden_size: int,
- num_heads: int,
- num_kv_heads: int,
- head_dim: int,
- max_position_embeddings: int = 8192,
- rope_theta: float = 10000,
- cache_config: Optional[CacheConfig] = None,
- quant_config: Optional[QuantizationConfig] = None) -> None:
+ def __init__(
+ self,
+ hidden_size: int,
+ num_heads: int,
+ num_kv_heads: int,
+ head_dim: int,
+ max_position_embeddings: int = 8192,
+ rope_theta: float = 10000,
+ cache_config: Optional[CacheConfig] = None,
+ quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
+ ) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
@@ -142,12 +153,14 @@ def __init__(self,
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
+ prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
+ prefix=f"{prefix}.o_proj",
)
self.rotary_emb = get_rope(
@@ -186,6 +199,7 @@ def __init__(
config: GemmaConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
@@ -198,6 +212,7 @@ def __init__(
rope_theta=config.rope_theta,
cache_config=cache_config,
quant_config=quant_config,
+ prefix=f"{prefix}.self_attn",
)
self.mlp = GemmaMLP(
hidden_size=self.hidden_size,
@@ -205,6 +220,7 @@ def __init__(
hidden_act=config.hidden_act,
hidden_activation=getattr(config, "hidden_activation", None),
quant_config=quant_config,
+ prefix=f"{prefix}.mlp",
)
self.input_layernorm = GemmaRMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
@@ -259,8 +275,8 @@ def __init__(
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
- lambda prefix: GemmaDecoderLayer(config, cache_config, quant_config
- ),
+ lambda prefix: GemmaDecoderLayer(
+ config, cache_config, quant_config, prefix=prefix),
prefix=f"{prefix}.layers")
self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@@ -366,6 +382,7 @@ def __init__(
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
lora_config: Optional[LoRAConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
@@ -375,7 +392,10 @@ def __init__(
self.lora_config = lora_config
self.quant_config = quant_config
- self.model = GemmaModel(config, cache_config, quant_config)
+ self.model = GemmaModel(config,
+ cache_config,
+ quant_config,
+ prefix=maybe_prefix(prefix, "model"))
self.logits_processor = LogitsProcessor(config.vocab_size)
self.sampler = Sampler()
self.make_empty_intermediate_tensors = (
diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py
index 9a77e48626ca5..313d98b649b48 100644
--- a/vllm/model_executor/models/internlm2.py
+++ b/vllm/model_executor/models/internlm2.py
@@ -30,7 +30,8 @@
from .interfaces import SupportsPP
from .utils import (is_pp_missing_parameter,
- make_empty_intermediate_tensors_factory, make_layers)
+ make_empty_intermediate_tensors_factory, make_layers,
+ maybe_prefix)
class InternLM2MLP(nn.Module):
@@ -41,16 +42,23 @@ def __init__(
intermediate_size: int,
hidden_act: str,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
- hidden_size, [intermediate_size] * 2,
+ hidden_size,
+ [intermediate_size] * 2,
+ bias=False,
+ quant_config=quant_config,
+ prefix=f"{prefix}.gate_up_proj",
+ )
+ self.w2 = RowParallelLinear(
+ intermediate_size,
+ hidden_size,
bias=False,
- quant_config=quant_config)
- self.w2 = RowParallelLinear(intermediate_size,
- hidden_size,
- bias=False,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.w2",
+ )
if hidden_act != "silu":
raise ValueError(f"Unsupported activation: {hidden_act}. "
"Only silu is supported for now.")
@@ -75,6 +83,7 @@ def __init__(
max_position_embeddings: int = 8192,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = hidden_size
@@ -108,12 +117,14 @@ def __init__(
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
+ prefix=f"{prefix}.wqkv",
)
self.wo = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
+ prefix=f"{prefix}.wo",
)
self.rotary_emb = get_rope(
@@ -123,12 +134,15 @@ def __init__(
base=rope_theta,
rope_scaling=rope_scaling,
)
- self.attn = Attention(self.num_heads,
- self.head_dim,
- self.scaling,
- num_kv_heads=self.num_kv_heads,
- cache_config=cache_config,
- quant_config=quant_config)
+ self.attn = Attention(
+ self.num_heads,
+ self.head_dim,
+ self.scaling,
+ num_kv_heads=self.num_kv_heads,
+ cache_config=cache_config,
+ quant_config=quant_config,
+ prefix=f"{prefix}.attn",
+ )
def split_qkv(self, qkv: torch.Tensor):
seq_len = qkv.shape[0]
@@ -176,6 +190,7 @@ def __init__(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
@@ -192,12 +207,14 @@ def __init__(
max_position_embeddings=max_position_embeddings,
cache_config=cache_config,
quant_config=quant_config,
+ prefix=f"{prefix}.attention",
)
self.feed_forward = InternLM2MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
+ prefix=f"{prefix}.feed_forward",
)
self.attention_norm = RMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
@@ -251,8 +268,8 @@ def __init__(
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
- lambda prefix: InternLMDecoderLayer(config, cache_config,
- quant_config),
+ lambda prefix: InternLMDecoderLayer(
+ config, cache_config, quant_config, prefix=prefix),
prefix=f"{prefix}.layers")
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.make_empty_intermediate_tensors = (
@@ -306,14 +323,19 @@ def __init__(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.config = config
self.quant_config = quant_config
- self.model = InternLM2Model(config, cache_config, quant_config)
+ self.model = InternLM2Model(config,
+ cache_config,
+ quant_config,
+ prefix=maybe_prefix(prefix, "model"))
self.output = ParallelLMHead(config.vocab_size,
config.hidden_size,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=maybe_prefix(prefix, "output"))
if self.config.tie_word_embeddings:
self.output.weight = self.model.tok_embeddings.weight
self.logits_processor = LogitsProcessor(config.vocab_size)
diff --git a/vllm/model_executor/models/internlm2_ve.py b/vllm/model_executor/models/internlm2_ve.py
index 6effd70b75da3..edd867e4b6457 100644
--- a/vllm/model_executor/models/internlm2_ve.py
+++ b/vllm/model_executor/models/internlm2_ve.py
@@ -15,7 +15,7 @@
InternLM2MLP, InternLM2Model)
from vllm.sequence import IntermediateTensors
-from .utils import make_layers
+from .utils import make_layers, maybe_prefix
class InternLM2VEDecoderLayer(nn.Module):
@@ -25,6 +25,7 @@ def __init__(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
@@ -41,18 +42,21 @@ def __init__(
max_position_embeddings=max_position_embeddings,
cache_config=cache_config,
quant_config=quant_config,
+ prefix=f"{prefix}.attention",
)
self.feed_forward = InternLM2MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
+ prefix=f"{prefix}.feed_forward",
)
self.feed_forward_ve = InternLM2MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
+ prefix=f"{prefix}.feed_forward_ve",
)
self.attention_norm = RMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
@@ -111,8 +115,8 @@ def __init__(
super().__init__(config, cache_config, quant_config)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
- lambda prefix: InternLM2VEDecoderLayer(config, cache_config,
- quant_config),
+ lambda prefix: InternLM2VEDecoderLayer(
+ config, cache_config, quant_config, prefix=prefix),
prefix=f"{prefix}.layers")
def forward(
@@ -161,6 +165,10 @@ def __init__(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__(config, cache_config, quant_config)
- self.model = InternLM2VEModel(config, cache_config, quant_config)
+ self.model = InternLM2VEModel(config,
+ cache_config,
+ quant_config,
+ prefix=maybe_prefix(prefix, "model"))
diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py
index 3ae37d9fe5d85..1c1fde5b30983 100644
--- a/vllm/model_executor/models/internvl.py
+++ b/vllm/model_executor/models/internvl.py
@@ -439,7 +439,10 @@ def __init__(self,
)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
self.mlp1 = self._init_mlp1(config)
diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py
index b0ca1fe006239..98c53bdaae811 100644
--- a/vllm/model_executor/models/llama.py
+++ b/vllm/model_executor/models/llama.py
@@ -55,7 +55,8 @@
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (AutoWeightsLoader, PPMissingLayer, is_pp_missing_parameter,
- make_empty_intermediate_tensors_factory, make_layers)
+ make_empty_intermediate_tensors_factory, make_layers,
+ maybe_prefix)
class LlamaMLP(nn.Module):
@@ -500,6 +501,7 @@ def __init__(
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
lora_config: Optional[LoRAConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
@@ -510,7 +512,7 @@ def __init__(
cache_config,
quant_config,
lora_config=lora_config,
- prefix="model")
+ prefix=maybe_prefix(prefix, "model"))
if get_pp_group().is_last_rank:
self.unpadded_vocab_size = config.vocab_size
if lora_config:
@@ -526,6 +528,7 @@ def __init__(
if not lora_config else
lora_config.lora_vocab_padding_size),
quant_config=quant_config,
+ prefix=maybe_prefix(prefix, "lm_head"),
)
if config.tie_word_embeddings:
self.lm_head = self.lm_head.tie_weights(
diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py
index b005d83c17f90..eda99c029881f 100644
--- a/vllm/model_executor/models/llava.py
+++ b/vllm/model_executor/models/llava.py
@@ -210,6 +210,7 @@ def init_vision_tower_for_llava(
quant_config: Optional[QuantizationConfig],
*,
require_post_norm: Optional[bool] = None,
+ prefix: str = "",
):
vision_config = hf_config.vision_config
@@ -224,23 +225,26 @@ def init_vision_tower_for_llava(
if isinstance(vision_config, CLIPVisionConfig):
return CLIPVisionModel(
vision_config,
- quant_config,
+ quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers,
require_post_norm=require_post_norm,
+ prefix=prefix,
)
elif isinstance(vision_config, SiglipVisionConfig):
return SiglipVisionModel(
vision_config,
- quant_config,
+ quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers,
require_post_norm=require_post_norm,
+ prefix=prefix,
)
elif isinstance(vision_config, PixtralVisionConfig):
return PixtralHFVisionModel(
vision_config,
- quant_config,
+ quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers,
require_post_norm=require_post_norm,
+ prefix=prefix,
)
msg = f"Unsupported vision config: {type(vision_config)}"
@@ -274,14 +278,20 @@ def __init__(self,
# TODO: Optionally initializes this for supporting embeddings.
self.vision_tower = init_vision_tower_for_llava(
- config, quant_config, require_post_norm=False)
+ config,
+ quant_config,
+ require_post_norm=False,
+ prefix="vision_tower")
self.multi_modal_projector = LlavaMultiModalProjector(
vision_hidden_size=config.vision_config.hidden_size,
text_hidden_size=config.text_config.hidden_size,
projector_hidden_act=config.projector_hidden_act)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors)
diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py
index 2a582deeaa2c9..f85129b206919 100644
--- a/vllm/model_executor/models/llava_next.py
+++ b/vllm/model_executor/models/llava_next.py
@@ -293,7 +293,10 @@ def __init__(self,
# TODO: Optionally initializes this for supporting embeddings.
self.vision_tower = init_vision_tower_for_llava(
- config, quant_config, require_post_norm=False)
+ config,
+ quant_config,
+ require_post_norm=False,
+ prefix="vision_tower")
self.image_newline = nn.Parameter(
torch.empty(config.text_config.hidden_size))
self.multi_modal_projector = LlavaMultiModalProjector(
@@ -302,7 +305,10 @@ def __init__(self,
projector_hidden_act=config.projector_hidden_act)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
# The same model class supports both language generation and embedding
# because the architecture name is the same
diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py
index 43eec43d56643..b8051d5fc6ae2 100644
--- a/vllm/model_executor/models/llava_next_video.py
+++ b/vllm/model_executor/models/llava_next_video.py
@@ -257,14 +257,20 @@ def __init__(self,
# Initialize the vision tower only up to the required feature layer
self.vision_tower = init_vision_tower_for_llava(
- config, quant_config, require_post_norm=False)
+ config,
+ quant_config,
+ require_post_norm=False,
+ prefix="vision_tower")
self.vision_resampler = LlavaNextVideoPooler(config)
self.multi_modal_projector = LlavaNextMultiModalProjector(
vision_hidden_size=config.vision_config.hidden_size,
text_hidden_size=config.text_config.hidden_size,
projector_hidden_act=config.projector_hidden_act)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
self.make_empty_intermediate_tensors = (
self.language_model.model.make_empty_intermediate_tensors)
diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py
index 9606b126141df..a0cf208a65f36 100644
--- a/vllm/model_executor/models/llava_onevision.py
+++ b/vllm/model_executor/models/llava_onevision.py
@@ -415,10 +415,16 @@ def __init__(self,
# Initialize the vision tower only up to the required feature layer
self.vision_tower = init_vision_tower_for_llava(
- config, quant_config, require_post_norm=False)
+ config,
+ quant_config,
+ require_post_norm=False,
+ prefix="vision_tower")
self.multi_modal_projector = LlavaOnevisionMultiModalProjector(config)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
self.image_newline = nn.Parameter(
torch.empty(config.text_config.hidden_size))
diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py
index 2ec51dc4647f5..a270282d87bc8 100644
--- a/vllm/model_executor/models/minicpmv.py
+++ b/vllm/model_executor/models/minicpmv.py
@@ -394,8 +394,11 @@ def __init__(
self.multimodal_config = multimodal_config
self.version = get_version_by_config(self.config)
- self.llm = self.init_llm(config, cache_config, quant_config)
- self.vpm = self.init_vision_module(config, quant_config)
+ self.llm = self.init_llm(config,
+ cache_config,
+ quant_config,
+ prefix="llm")
+ self.vpm = self.init_vision_module(config, quant_config, prefix="vpm")
param_dtype = torch.get_default_dtype()
self.vpm.to(dtype=param_dtype)
self.vision_dim = (self.vpm.embed_dim if self.version == (2, 0) else
@@ -403,9 +406,11 @@ def __init__(
self.embed_dim = self.config.hidden_size
self.resampler = self.init_resampler(self.embed_dim, self.vision_dim)
self.resampler.to(device="cuda", dtype=param_dtype)
+ # TODO: why is there _KEYS_TO_MODIFY_MAPPING? lm_head should be in llm
self.lm_head = ParallelLMHead(config.vocab_size,
config.hidden_size,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix="llm.lm_head")
self.logits_processor = LogitsProcessor(config.vocab_size)
self.sampler = Sampler()
@@ -644,6 +649,7 @@ def init_llm(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> nn.Module:
raise NotImplementedError
@@ -651,6 +657,7 @@ def init_vision_module(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig],
+ prefix: str = "",
) -> nn.Module:
raise NotImplementedError
@@ -690,17 +697,20 @@ def init_llm(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> nn.Module:
return LLMWrapper(MiniCPMModel(config,
cache_config=cache_config,
- quant_config=quant_config),
+ quant_config=quant_config,
+ prefix=prefix),
name="model")
def init_vision_module(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig],
+ prefix: str = "",
) -> nn.Module:
# TODO :refactor this vision model
try:
@@ -819,19 +829,23 @@ def init_llm(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> nn.Module:
return LLMWrapper(LlamaModel(config,
cache_config=cache_config,
- quant_config=quant_config),
+ quant_config=quant_config,
+ prefix=prefix),
name="model")
def init_vision_module(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig],
+ prefix: str = "",
) -> nn.Module:
model = Idefics2VisionTransformer(config.vision_config,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=prefix)
if self.config.drop_vision_last_layer:
model.encoder.layers = model.encoder.layers[:-1]
return model
@@ -935,20 +949,24 @@ def init_llm(
config: PretrainedConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> nn.Module:
return LLMWrapper(Qwen2Model(config,
cache_config=cache_config,
- quant_config=quant_config),
+ quant_config=quant_config,
+ prefix=prefix),
name="model")
def init_vision_module(
self,
config: PretrainedConfig,
quant_config: Optional[QuantizationConfig],
+ prefix: str = "",
) -> nn.Module:
model = Idefics2VisionTransformer(config.vision_config,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=prefix)
if self.config.drop_vision_last_layer:
model.encoder.layers = model.encoder.layers[:-1]
return model
diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py
index 37c3fa919124e..10cca8b56268a 100644
--- a/vllm/model_executor/models/opt.py
+++ b/vllm/model_executor/models/opt.py
@@ -43,7 +43,8 @@
from .interfaces import SupportsPP
from .utils import (is_pp_missing_parameter,
- make_empty_intermediate_tensors_factory, make_layers)
+ make_empty_intermediate_tensors_factory, make_layers,
+ maybe_prefix)
class OPTLearnedPositionalEmbedding(nn.Embedding):
@@ -68,6 +69,7 @@ def __init__(
bias: bool = True,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.embed_dim = embed_dim
@@ -85,18 +87,21 @@ def __init__(
total_num_heads,
bias=bias,
quant_config=quant_config,
+ prefix=f"{prefix}.qkv_proj",
)
self.out_proj = RowParallelLinear(
embed_dim,
embed_dim,
bias=bias,
quant_config=quant_config,
+ prefix=f"{prefix}.out_proj",
)
self.attn = Attention(self.num_heads,
self.head_dim,
scale=self.scaling,
cache_config=cache_config,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.attn")
def forward(
self,
@@ -118,6 +123,7 @@ def __init__(
config: OPTConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
):
super().__init__()
self.config = config
@@ -128,6 +134,7 @@ def __init__(
bias=config.enable_bias,
cache_config=cache_config,
quant_config=quant_config,
+ prefix=f"{prefix}.self_attn",
)
self.do_layer_norm_before = config.do_layer_norm_before
@@ -139,6 +146,7 @@ def __init__(
config.ffn_dim,
bias=config.enable_bias,
quant_config=quant_config,
+ prefix=f"{prefix}.fc1",
)
self.activation_fn = get_act_fn(config.activation_function,
quant_config, config.ffn_dim)
@@ -147,6 +155,7 @@ def __init__(
self.embed_dim,
bias=config.enable_bias,
quant_config=quant_config,
+ prefix=f"{prefix}.fc2",
)
self.final_layer_norm = nn.LayerNorm(
self.embed_dim,
@@ -214,7 +223,8 @@ def __init__(
self.project_out = ReplicatedLinear(config.hidden_size,
config.word_embed_proj_dim,
bias=False,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.project_out")
else:
self.project_out = None
@@ -222,7 +232,8 @@ def __init__(
self.project_in = ReplicatedLinear(config.word_embed_proj_dim,
config.hidden_size,
bias=False,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.project_in")
else:
self.project_in = None
@@ -239,7 +250,8 @@ def __init__(
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
- lambda prefix: OPTDecoderLayer(config, cache_config, quant_config),
+ lambda prefix: OPTDecoderLayer(
+ config, cache_config, quant_config, prefix=prefix),
prefix=f"{prefix}.layers")
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
@@ -288,9 +300,13 @@ def __init__(
config: OPTConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
):
super().__init__()
- self.decoder = OPTDecoder(config, cache_config, quant_config)
+ self.decoder = OPTDecoder(config,
+ cache_config,
+ quant_config,
+ prefix=f"{prefix}.decoder")
self.make_empty_intermediate_tensors = (
make_empty_intermediate_tensors_factory(["hidden_states"],
config.hidden_size))
@@ -335,11 +351,15 @@ def __init__(
config: OPTConfig,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
):
super().__init__()
self.config = config
self.quant_config = quant_config
- self.model = OPTModel(config, cache_config, quant_config)
+ self.model = OPTModel(config,
+ cache_config,
+ quant_config,
+ prefix=maybe_prefix(prefix, "model"))
if self.config.tie_word_embeddings:
self.lm_head = self.model.decoder.embed_tokens
else:
diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py
index 7a62a098a4525..8e29c6079b994 100644
--- a/vllm/model_executor/models/paligemma.py
+++ b/vllm/model_executor/models/paligemma.py
@@ -143,14 +143,17 @@ def __init__(self,
self.multimodal_config = multimodal_config
self.vision_tower = SiglipVisionModel(config.vision_config,
- quant_config)
+ quant_config,
+ prefix="vision_tower")
self.multi_modal_projector = PaliGemmaMultiModalProjector(
vision_hidden_size=config.vision_config.hidden_size,
projection_dim=config.vision_config.projection_dim)
self.quant_config = quant_config
self.language_model = GemmaForCausalLM(config.text_config,
- cache_config, quant_config)
+ cache_config,
+ quant_config,
+ prefix="language_model")
logit_scale = getattr(config, "logit_scale", 1.0)
self.language_model.logits_processor.scale *= logit_scale
diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py
index 855a9b17585a4..0962d3d3847c9 100644
--- a/vllm/model_executor/models/phi3v.py
+++ b/vllm/model_executor/models/phi3v.py
@@ -71,7 +71,8 @@
def _init_img_processor(hf_config: PretrainedConfig,
- quant_config: Optional[QuantizationConfig]):
+ quant_config: Optional[QuantizationConfig],
+ prefix: str = "") -> CLIPVisionModel:
clip_config = CLIP_VIT_LARGE_PATCH14_336_CONFIG
layer_idx = hf_config.img_processor.get('layer_idx', -2)
@@ -86,6 +87,7 @@ def _init_img_processor(hf_config: PretrainedConfig,
clip_config,
quant_config,
num_hidden_layers_override=num_hidden_layers,
+ prefix=prefix,
)
return img_processor
@@ -152,15 +154,18 @@ def get_img_features(self,
class Phi3HDImageEmbedding(Phi3ImageEmbeddingBase):
"""Phi3 Image embedding with HD transform."""
- def __init__(self, config: PretrainedConfig,
- quant_config: Optional[QuantizationConfig]) -> None:
+ def __init__(self,
+ config: PretrainedConfig,
+ quant_config: Optional[QuantizationConfig],
+ prefix: str = "") -> None:
super().__init__()
# n_embed or hidden_size
hidden_size = config.n_embd if hasattr(
config, 'n_embd') else config.hidden_size
- self.img_processor = _init_img_processor(config, quant_config)
+ self.img_processor = _init_img_processor(
+ config, quant_config, prefix=f"{prefix}.img_processor")
image_dim_out = config.img_processor['image_dim_out']
self.num_img_tokens = config.img_processor['num_img_tokens']
@@ -537,11 +542,15 @@ def __init__(self,
config.hidden_size,
org_num_embeddings=config.vocab_size,
quant_config=quant_config,
+ prefix="model.embed_tokens",
)
# TODO: Optionally initializes this for supporting input embeddings.
- self.vision_embed_tokens = Phi3HDImageEmbedding(config, quant_config)
+ self.vision_embed_tokens = Phi3HDImageEmbedding(
+ config, quant_config, prefix="model.vision_embed_tokens")
+ # The prefix is empty intentionally because default prefix of
+ # LlamaForCausalLM is "model"
self.language_model = LlamaForCausalLM(config, cache_config,
quant_config)
diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py
index a9dbb3823743a..6b53bf5660096 100644
--- a/vllm/model_executor/models/pixtral.py
+++ b/vllm/model_executor/models/pixtral.py
@@ -164,7 +164,10 @@ def __init__(self,
# init MistralForCausalLM
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
self.vision_encoder = VisionTransformer(self.vision_args)
self.vision_language_adapter = VisionLanguageAdapter(
diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py
index 23eb1482ffef1..db1029345a8ac 100644
--- a/vllm/model_executor/models/qwen2.py
+++ b/vllm/model_executor/models/qwen2.py
@@ -49,7 +49,8 @@
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (AutoWeightsLoader, PPMissingLayer, is_pp_missing_parameter,
- make_empty_intermediate_tensors_factory, make_layers)
+ make_empty_intermediate_tensors_factory, make_layers,
+ maybe_prefix)
class Qwen2MLP(nn.Module):
@@ -60,16 +61,23 @@ def __init__(
intermediate_size: int,
hidden_act: str,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
- hidden_size, [intermediate_size] * 2,
+ hidden_size,
+ [intermediate_size] * 2,
+ bias=False,
+ quant_config=quant_config,
+ prefix=f"{prefix}.gate_up_proj",
+ )
+ self.down_proj = RowParallelLinear(
+ intermediate_size,
+ hidden_size,
bias=False,
- quant_config=quant_config)
- self.down_proj = RowParallelLinear(intermediate_size,
- hidden_size,
- bias=False,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.down_proj",
+ )
if hidden_act != "silu":
raise ValueError(f"Unsupported activation: {hidden_act}. "
"Only silu is supported for now.")
@@ -92,7 +100,8 @@ def __init__(self,
rope_theta: float = 10000,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
- rope_scaling: Optional[Tuple] = None) -> None:
+ rope_scaling: Optional[Tuple] = None,
+ prefix: str = "") -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
@@ -122,12 +131,14 @@ def __init__(self,
self.total_num_kv_heads,
bias=True,
quant_config=quant_config,
+ prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
+ prefix=f"{prefix}.o_proj",
)
self.rotary_emb = get_rope(
@@ -142,7 +153,8 @@ def __init__(self,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=f"{prefix}.attn")
def forward(
self,
@@ -166,6 +178,7 @@ def __init__(
config: Qwen2Config,
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
+ prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
@@ -180,12 +193,15 @@ def __init__(
rope_theta=rope_theta,
cache_config=cache_config,
quant_config=quant_config,
- rope_scaling=rope_scaling)
+ rope_scaling=rope_scaling,
+ prefix=f"{prefix}.self_attn",
+ )
self.mlp = Qwen2MLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
+ prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size,
eps=config.rms_norm_eps)
@@ -241,6 +257,7 @@ def __init__(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
+ prefix=f"{prefix}.embed_tokens",
)
else:
self.embed_tokens = PPMissingLayer()
@@ -249,7 +266,8 @@ def __init__(
config.num_hidden_layers,
lambda prefix: Qwen2DecoderLayer(config=config,
cache_config=cache_config,
- quant_config=quant_config),
+ quant_config=quant_config,
+ prefix=f"{prefix}.layers"),
prefix=f"{prefix}.layers",
)
@@ -393,6 +411,7 @@ def __init__(
cache_config: Optional[CacheConfig] = None,
quant_config: Optional[QuantizationConfig] = None,
lora_config: Optional[LoRAConfig] = None,
+ prefix: str = "",
) -> None:
# TODO (@robertgshaw2): see if this can be moved out
if (cache_config.sliding_window is not None
@@ -412,14 +431,19 @@ def __init__(
self.lora_config = lora_config
self.quant_config = quant_config
- self.model = Qwen2Model(config, cache_config, quant_config)
+ self.model = Qwen2Model(config,
+ cache_config,
+ quant_config,
+ prefix=maybe_prefix(prefix, "model"))
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(config.vocab_size,
config.hidden_size,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix=maybe_prefix(
+ prefix, "lm_head"))
self.logits_processor = LogitsProcessor(config.vocab_size)
self.sampler = Sampler()
diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py
index 4e60fe70b25f1..633d66b4af31a 100644
--- a/vllm/model_executor/models/qwen2_vl.py
+++ b/vllm/model_executor/models/qwen2_vl.py
@@ -938,7 +938,10 @@ def __init__(self,
quant_config=None,
)
- self.model = Qwen2Model(config, cache_config, quant_config)
+ self.model = Qwen2Model(config,
+ cache_config,
+ quant_config,
+ prefix="model")
if get_pp_group().is_last_rank:
if config.tie_word_embeddings:
@@ -946,7 +949,8 @@ def __init__(self,
else:
self.lm_head = ParallelLMHead(config.vocab_size,
config.hidden_size,
- quant_config=quant_config)
+ quant_config=quant_config,
+ prefix="lm_head")
else:
self.lm_head = PPMissingLayer()
diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py
index 5f33b872beecb..f08e4aa355086 100644
--- a/vllm/model_executor/models/ultravox.py
+++ b/vllm/model_executor/models/ultravox.py
@@ -357,7 +357,10 @@ def __init__(self,
))
self.multi_modal_projector = UltravoxProjector(config)
self.language_model = init_vllm_registered_model(
- config.text_config, cache_config, quant_config)
+ config.text_config,
+ cache_config,
+ quant_config,
+ prefix="language_model")
if config.text_model_id is not None:
self.secondary_weights.append(
DefaultModelLoader.Source(model_or_path=config.text_model_id,
diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py
index 6995f5805c5e1..0aecb5d151a45 100644
--- a/vllm/model_executor/models/utils.py
+++ b/vllm/model_executor/models/utils.py
@@ -242,6 +242,7 @@ def init_vllm_registered_model(
lora_config: Optional[LoRAConfig] = None,
multimodal_config: Optional[MultiModalConfig] = None,
scheduler_config: Optional[SchedulerConfig] = None,
+ prefix: str = "",
) -> nn.Module:
"""
Helper function to initialize an inner model registered to vLLM,
@@ -257,6 +258,7 @@ def init_vllm_registered_model(
lora_config=lora_config,
multimodal_config=multimodal_config,
scheduler_config=scheduler_config,
+ prefix=prefix,
)
@@ -610,3 +612,16 @@ def get_vit_attn_backend() -> _Backend:
else:
selected_backend = _Backend.XFORMERS
return selected_backend
+
+
+def maybe_prefix(prefix: str, name: str) -> str:
+ """Add a prefix to a name if the prefix is non-empty.
+
+ Args:
+ prefix: The prefix to add. If empty, no prefix will be added.
+ name: The name to potentially prefix.
+
+ Returns:
+ The string "prefix.name" if prefix was non-empty, otherwise just "name".
+ """
+ return name if not prefix else f"{prefix}.{name}"
From 1ab6f6b4ad5c4aac6ee72e51b7f6712098f9ccff Mon Sep 17 00:00:00 2001
From: youkaichao
Date: Tue, 29 Oct 2024 17:06:24 -0700
Subject: [PATCH 33/38] [core][distributed] fix custom allreduce in pytorch 2.5
(#9815)
Signed-off-by: youkaichao
---
.../device_communicators/custom_all_reduce.py | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py
index 7de5b05a0b053..c3632aee6d11a 100644
--- a/vllm/distributed/device_communicators/custom_all_reduce.py
+++ b/vllm/distributed/device_communicators/custom_all_reduce.py
@@ -191,8 +191,20 @@ def capture(self):
def _get_ipc_meta(self, inp: torch.Tensor):
data = inp.untyped_storage()._share_cuda_()
+ handle = data[1]
+ # https://github.com/pytorch/pytorch/pull/130890 changes
+ # the binary format of the ipc handle
+ # it starts from pytorch 2.5
+ if len(handle) > 64:
+ assert len(handle) == 66
+ # only support SHAREABLE_HANDLE_VERSION = 1
+ assert int(handle[0]) == 1
+ # only support SHAREABLE_CUDA_MALLOC = 'c'
+ assert handle[1] == ord("c")
+ handle = handle[2:]
+ # TODO: support expandable segment
shard_data = (
- data[1], # ipc handle to base ptr
+ handle, # ipc handle to base ptr
data[3], # offset of base ptr
)
return self._gather_ipc_meta(shard_data)
From 64cb1cdc3f3a6c0ca976d68b19d454122c720e6d Mon Sep 17 00:00:00 2001
From: Lily Liu
Date: Tue, 29 Oct 2024 17:28:43 -0700
Subject: [PATCH 34/38] Update README.md (#9819)
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 8c8d6eb291cea..b75bfc5c699a7 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ Easy, fast, and cheap LLM serving for everyone
---
-**vLLM x Snowfkale Meetup (Wednesday, November 13th, 5:30-8PM PT) at Snowfkale HQ, San Mateo**
+**vLLM x Snowflake Meetup (Wednesday, November 13th, 5:30-8PM PT) at Snowflake HQ, San Mateo**
We are excited to announce the last in-person vLLM meetup of the year!
Join the vLLM developers and engineers from Snowflake AI Research to chat about the latest LLM inference optimizations and your 2025 vLLM wishlist!
From 226688bd6114749633132b9ed074c59d50904830 Mon Sep 17 00:00:00 2001
From: Michael Goin
Date: Tue, 29 Oct 2024 22:49:44 -0400
Subject: [PATCH 35/38] [Bugfix][VLM] Make apply_fp8_linear work with >2D input
(#9812)
---
.../layers/quantization/utils/w8a8_utils.py | 33 +++++++++++--------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/vllm/model_executor/layers/quantization/utils/w8a8_utils.py b/vllm/model_executor/layers/quantization/utils/w8a8_utils.py
index 1879d2855d93d..445117ac99a34 100644
--- a/vllm/model_executor/layers/quantization/utils/w8a8_utils.py
+++ b/vllm/model_executor/layers/quantization/utils/w8a8_utils.py
@@ -96,21 +96,26 @@ def apply_fp8_linear(
# If dynamic, layer.input_scale is None and x_scale computed from x.
# If static, layer.input_scale is scalar and x_scale is input_scale.
+ # View input as 2D matrix for fp8 methods
+ input_2d = input.view(-1, input.shape[-1])
+ output_shape = [*input.shape[:-1], weight.shape[1]]
+
# cutlass_scaled_mm supports per tensor/channel W and per tensor/token A
if cutlass_fp8_supported:
qinput, x_scale = ops.scaled_fp8_quant(
- input,
+ input_2d,
input_scale,
scale_ub=input_scale_ub,
use_per_token_if_dynamic=use_per_token_if_dynamic)
# Fused GEMM_DQ
- return ops.cutlass_scaled_mm(qinput,
- weight,
- out_dtype=input.dtype,
- scale_a=x_scale,
- scale_b=weight_scale,
- bias=bias)
+ output = ops.cutlass_scaled_mm(qinput,
+ weight,
+ out_dtype=input.dtype,
+ scale_a=x_scale,
+ scale_b=weight_scale,
+ bias=bias)
+ return output.view(*output_shape)
# torch.scaled_mm supports per tensor weights + activations only
# so fallback to naive if per channel or per token
@@ -119,7 +124,7 @@ def apply_fp8_linear(
# for matrices with batch dimension > 16.
# This could change in the future.
qinput, x_scale = ops.scaled_fp8_quant(
- input,
+ input_2d,
input_scale,
num_token_padding=17,
use_per_token_if_dynamic=use_per_token_if_dynamic)
@@ -138,8 +143,10 @@ def apply_fp8_linear(
# A fix for discrepancy in scaled_mm which returns tuple
# for torch < 2.5 and a single value in torch >= 2.5
if type(output) is tuple and len(output) == 2:
- return torch.narrow(output[0], 0, 0, input.shape[0])
- return torch.narrow(output, 0, 0, input.shape[0])
+ output = output[0]
+
+ return torch.narrow(output, 0, 0,
+ input_2d.shape[0]).view(*output_shape)
else:
# Fallback for channelwise case, where we use unfused DQ
@@ -176,15 +183,15 @@ def apply_fp8_linear(
if type(output) is tuple and len(output) == 2:
output = output[0]
# Unpad (undo num_token_padding)
- output = torch.narrow(output, 0, 0, input.shape[0])
- x_scale = torch.narrow(x_scale, 0, 0, input.shape[0])
+ output = torch.narrow(output, 0, 0, input_2d.shape[0])
+ x_scale = torch.narrow(x_scale, 0, 0, input_2d.shape[0])
# DQ
# C = sw * sx * (X * W) + bias
output = output * x_scale * weight_scale.t()
if bias is not None:
output = output + bias
- return output.to(dtype=input.dtype)
+ return output.to(dtype=input.dtype).view(*output_shape)
def apply_int8_linear(
From 62fac4b9aab3c05124d83fcd71db5732774b17d8 Mon Sep 17 00:00:00 2001
From: "Kevin H. Luu"
Date: Tue, 29 Oct 2024 17:34:55 -1000
Subject: [PATCH 36/38] [ci/build] Pin CI dependencies version with pip-compile
(#9810)
Signed-off-by: kevin
---
Dockerfile.rocm | 2 +
requirements-build.txt | 18 +-
requirements-test.in | 37 +++
requirements-test.txt | 593 ++++++++++++++++++++++++++++++++++++++---
4 files changed, 608 insertions(+), 42 deletions(-)
create mode 100644 requirements-test.in
diff --git a/Dockerfile.rocm b/Dockerfile.rocm
index d35889f053e27..562117a313020 100644
--- a/Dockerfile.rocm
+++ b/Dockerfile.rocm
@@ -121,6 +121,8 @@ ARG GIT_REPO_CHECK=0
RUN --mount=type=bind,source=.git,target=.git \
if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi
+RUN python3 -m pip install --upgrade pip
+
# Package upgrades for useful functionality or to avoid dependency issues
RUN --mount=type=cache,target=/root/.cache/pip \
python3 -m pip install --upgrade numba scipy huggingface-hub[cli] pytest-shard
diff --git a/requirements-build.txt b/requirements-build.txt
index ea2b688bb3108..7b16d9778c1a6 100644
--- a/requirements-build.txt
+++ b/requirements-build.txt
@@ -1,9 +1,9 @@
-# Should be mirrored in pyproject.toml
-cmake>=3.26
-ninja
-packaging
-setuptools>=61
-setuptools-scm>=8
-torch==2.5.0
-wheel
-jinja2
+# Should be mirrored in pyproject.toml
+cmake>=3.26
+ninja
+packaging
+setuptools>=61
+setuptools-scm>=8
+torch==2.5.0
+wheel
+jinja2
diff --git a/requirements-test.in b/requirements-test.in
new file mode 100644
index 0000000000000..3881f2566b556
--- /dev/null
+++ b/requirements-test.in
@@ -0,0 +1,37 @@
+# testing
+pytest
+tensorizer>=2.9.0
+pytest-forked
+pytest-asyncio
+pytest-rerunfailures
+pytest-shard
+
+# testing utils
+awscli
+einops # required for MPT, qwen-vl and Mamba
+httpx
+librosa # required for audio tests
+opencv-python # required for video tests
+peft
+requests
+ray[adag]==2.35
+sentence-transformers # required for embedding
+soundfile # required for audio test
+timm # required for internvl test
+torch==2.5.0
+transformers_stream_generator # required for qwen-vl test
+matplotlib # required for qwen-vl test
+datamodel_code_generator # required for minicpm3 test
+lm-eval[api]==0.4.4 # required for model evaluation test
+
+# TODO: Add this after fully implementing llava(mantis)
+# git+https://github.com/TIGER-AI-Lab/Mantis.git # required for llava(mantis) test
+
+# Benchmarking
+aiohttp
+
+# quantization
+bitsandbytes>=0.44.0
+buildkite-test-collector==0.1.8
+
+numpy < 2.0.0
diff --git a/requirements-test.txt b/requirements-test.txt
index 9787fa2a4a486..c474c2ec34b22 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -1,34 +1,561 @@
-# testing
-pytest
-tensorizer>=2.9.0
-pytest-forked
-pytest-asyncio
-pytest-rerunfailures
-pytest-shard
-
-# testing utils
-awscli
-einops # required for MPT, qwen-vl and Mamba
-httpx
-librosa # required for audio tests
-opencv-python # required for video tests
-peft
-requests
-ray[adag]==2.35
-sentence-transformers # required for embedding
-soundfile # required for audio test
-timm # required for internvl test
-transformers_stream_generator # required for qwen-vl test
-matplotlib # required for qwen-vl test
-datamodel_code_generator # required for minicpm3 test
-lm-eval[api]==0.4.4 # required for model evaluation test
-
-# TODO: Add this after fully implementing llava(mantis)
-# git+https://github.com/TIGER-AI-Lab/Mantis.git # required for llava(mantis) test
-
-# Benchmarking
-aiohttp
-
-# quantization
-bitsandbytes>=0.44.0
+#
+# This file is autogenerated by pip-compile with Python 3.12
+# by the following command:
+#
+# pip-compile --output-file=requirements-test.txt requirements-test.in
+#
+absl-py==2.1.0
+ # via rouge-score
+accelerate==1.0.1
+ # via
+ # lm-eval
+ # peft
+aiohappyeyeballs==2.4.3
+ # via aiohttp
+aiohttp==3.10.10
+ # via
+ # -r requirements-test.in
+ # datasets
+ # fsspec
+ # lm-eval
+aiosignal==1.3.1
+ # via
+ # aiohttp
+ # ray
+annotated-types==0.7.0
+ # via pydantic
+anyio==4.6.2.post1
+ # via httpx
+argcomplete==3.5.1
+ # via datamodel-code-generator
+attrs==24.2.0
+ # via
+ # aiohttp
+ # jsonlines
+ # jsonschema
+ # referencing
+audioread==3.0.1
+ # via librosa
+awscli==1.35.16
+ # via -r requirements-test.in
+bitsandbytes==0.44.1
+ # via -r requirements-test.in
+black==24.10.0
+ # via datamodel-code-generator
+boto3==1.35.50
+ # via tensorizer
+botocore==1.35.50
+ # via
+ # awscli
+ # boto3
+ # s3transfer
buildkite-test-collector==0.1.8
+ # via -r requirements-test.in
+certifi==2024.8.30
+ # via
+ # httpcore
+ # httpx
+ # requests
+cffi==1.17.1
+ # via soundfile
+chardet==5.2.0
+ # via mbstrdecoder
+charset-normalizer==3.4.0
+ # via requests
+click==8.1.7
+ # via
+ # black
+ # nltk
+ # ray
+colorama==0.4.6
+ # via
+ # awscli
+ # sacrebleu
+ # tqdm-multiprocess
+contourpy==1.3.0
+ # via matplotlib
+cupy-cuda12x==13.3.0
+ # via ray
+cycler==0.12.1
+ # via matplotlib
+datamodel-code-generator==0.26.2
+ # via -r requirements-test.in
+dataproperty==1.0.1
+ # via
+ # pytablewriter
+ # tabledata
+datasets==3.0.2
+ # via
+ # evaluate
+ # lm-eval
+decorator==5.1.1
+ # via librosa
+dill==0.3.8
+ # via
+ # datasets
+ # evaluate
+ # lm-eval
+ # multiprocess
+dnspython==2.7.0
+ # via email-validator
+docutils==0.16
+ # via awscli
+einops==0.8.0
+ # via -r requirements-test.in
+email-validator==2.2.0
+ # via pydantic
+evaluate==0.4.3
+ # via lm-eval
+fastrlock==0.8.2
+ # via cupy-cuda12x
+filelock==3.16.1
+ # via
+ # datasets
+ # huggingface-hub
+ # ray
+ # torch
+ # transformers
+ # triton
+fonttools==4.54.1
+ # via matplotlib
+frozenlist==1.5.0
+ # via
+ # aiohttp
+ # aiosignal
+ # ray
+fsspec[http]==2024.9.0
+ # via
+ # datasets
+ # evaluate
+ # huggingface-hub
+ # torch
+genson==1.3.0
+ # via datamodel-code-generator
+h11==0.14.0
+ # via httpcore
+hiredis==3.0.0
+ # via tensorizer
+httpcore==1.0.6
+ # via httpx
+httpx==0.27.2
+ # via -r requirements-test.in
+huggingface-hub==0.26.2
+ # via
+ # accelerate
+ # datasets
+ # evaluate
+ # peft
+ # sentence-transformers
+ # timm
+ # tokenizers
+ # transformers
+idna==3.10
+ # via
+ # anyio
+ # email-validator
+ # httpx
+ # requests
+ # yarl
+inflect==5.6.2
+ # via datamodel-code-generator
+iniconfig==2.0.0
+ # via pytest
+isort==5.13.2
+ # via datamodel-code-generator
+jinja2==3.1.4
+ # via
+ # datamodel-code-generator
+ # torch
+jmespath==1.0.1
+ # via
+ # boto3
+ # botocore
+joblib==1.4.2
+ # via
+ # librosa
+ # nltk
+ # scikit-learn
+jsonlines==4.0.0
+ # via lm-eval
+jsonschema==4.23.0
+ # via ray
+jsonschema-specifications==2024.10.1
+ # via jsonschema
+kiwisolver==1.4.7
+ # via matplotlib
+lazy-loader==0.4
+ # via librosa
+libnacl==2.1.0
+ # via tensorizer
+librosa==0.10.2.post1
+ # via -r requirements-test.in
+llvmlite==0.43.0
+ # via numba
+lm-eval[api]==0.4.4
+ # via -r requirements-test.in
+lxml==5.3.0
+ # via sacrebleu
+markupsafe==3.0.2
+ # via jinja2
+matplotlib==3.9.2
+ # via -r requirements-test.in
+mbstrdecoder==1.1.3
+ # via
+ # dataproperty
+ # pytablewriter
+ # typepy
+more-itertools==10.5.0
+ # via lm-eval
+mpmath==1.3.0
+ # via sympy
+msgpack==1.1.0
+ # via
+ # librosa
+ # ray
+multidict==6.1.0
+ # via
+ # aiohttp
+ # yarl
+multiprocess==0.70.16
+ # via
+ # datasets
+ # evaluate
+mypy-extensions==1.0.0
+ # via black
+networkx==3.2.1
+ # via torch
+nltk==3.9.1
+ # via rouge-score
+numba==0.60.0
+ # via librosa
+numexpr==2.10.1
+ # via lm-eval
+numpy==1.26.4
+ # via
+ # -r requirements-test.in
+ # accelerate
+ # bitsandbytes
+ # contourpy
+ # cupy-cuda12x
+ # datasets
+ # evaluate
+ # librosa
+ # matplotlib
+ # numba
+ # numexpr
+ # opencv-python
+ # pandas
+ # peft
+ # rouge-score
+ # sacrebleu
+ # scikit-learn
+ # scipy
+ # soxr
+ # tensorizer
+ # torchvision
+ # transformers
+nvidia-cublas-cu12==12.4.5.8
+ # via
+ # nvidia-cudnn-cu12
+ # nvidia-cusolver-cu12
+ # torch
+nvidia-cuda-cupti-cu12==12.4.127
+ # via torch
+nvidia-cuda-nvrtc-cu12==12.4.127
+ # via torch
+nvidia-cuda-runtime-cu12==12.4.127
+ # via torch
+nvidia-cudnn-cu12==9.1.0.70
+ # via torch
+nvidia-cufft-cu12==11.2.1.3
+ # via torch
+nvidia-curand-cu12==10.3.5.147
+ # via torch
+nvidia-cusolver-cu12==11.6.1.9
+ # via torch
+nvidia-cusparse-cu12==12.3.1.170
+ # via
+ # nvidia-cusolver-cu12
+ # torch
+nvidia-nccl-cu12==2.21.5
+ # via torch
+nvidia-nvjitlink-cu12==12.4.127
+ # via
+ # nvidia-cusolver-cu12
+ # nvidia-cusparse-cu12
+ # torch
+nvidia-nvtx-cu12==12.4.127
+ # via torch
+opencv-python==4.10.0.84
+ # via -r requirements-test.in
+packaging==24.1
+ # via
+ # accelerate
+ # black
+ # datamodel-code-generator
+ # datasets
+ # evaluate
+ # huggingface-hub
+ # lazy-loader
+ # matplotlib
+ # peft
+ # pooch
+ # pytest
+ # pytest-rerunfailures
+ # ray
+ # transformers
+ # typepy
+pandas==2.2.3
+ # via
+ # datasets
+ # evaluate
+pathspec==0.12.1
+ # via black
+pathvalidate==3.2.1
+ # via pytablewriter
+peft==0.13.2
+ # via
+ # -r requirements-test.in
+ # lm-eval
+pillow==11.0.0
+ # via
+ # matplotlib
+ # sentence-transformers
+ # torchvision
+platformdirs==4.3.6
+ # via
+ # black
+ # pooch
+pluggy==1.5.0
+ # via pytest
+pooch==1.8.2
+ # via librosa
+portalocker==2.10.1
+ # via sacrebleu
+propcache==0.2.0
+ # via yarl
+protobuf==5.28.3
+ # via
+ # ray
+ # tensorizer
+psutil==6.1.0
+ # via
+ # accelerate
+ # peft
+ # tensorizer
+py==1.11.0
+ # via pytest-forked
+pyarrow==18.0.0
+ # via datasets
+pyasn1==0.6.1
+ # via rsa
+pybind11==2.13.6
+ # via lm-eval
+pycparser==2.22
+ # via cffi
+pydantic[email]==2.9.2
+ # via datamodel-code-generator
+pydantic-core==2.23.4
+ # via pydantic
+pyparsing==3.2.0
+ # via matplotlib
+pytablewriter==1.2.0
+ # via lm-eval
+pytest==8.3.3
+ # via
+ # -r requirements-test.in
+ # buildkite-test-collector
+ # pytest-asyncio
+ # pytest-forked
+ # pytest-rerunfailures
+ # pytest-shard
+pytest-asyncio==0.24.0
+ # via -r requirements-test.in
+pytest-forked==1.6.0
+ # via -r requirements-test.in
+pytest-rerunfailures==14.0
+ # via -r requirements-test.in
+pytest-shard==0.1.2
+ # via -r requirements-test.in
+python-dateutil==2.9.0.post0
+ # via
+ # botocore
+ # matplotlib
+ # pandas
+ # typepy
+pytz==2024.2
+ # via
+ # pandas
+ # typepy
+pyyaml==6.0.2
+ # via
+ # accelerate
+ # awscli
+ # datamodel-code-generator
+ # datasets
+ # huggingface-hub
+ # peft
+ # ray
+ # timm
+ # transformers
+ray[adag]==2.35.0
+ # via -r requirements-test.in
+redis==5.2.0
+ # via tensorizer
+referencing==0.35.1
+ # via
+ # jsonschema
+ # jsonschema-specifications
+regex==2024.9.11
+ # via
+ # nltk
+ # sacrebleu
+ # tiktoken
+ # transformers
+requests==2.32.3
+ # via
+ # -r requirements-test.in
+ # buildkite-test-collector
+ # datasets
+ # evaluate
+ # huggingface-hub
+ # lm-eval
+ # pooch
+ # ray
+ # tiktoken
+ # transformers
+rouge-score==0.1.2
+ # via lm-eval
+rpds-py==0.20.0
+ # via
+ # jsonschema
+ # referencing
+rsa==4.7.2
+ # via awscli
+s3transfer==0.10.3
+ # via
+ # awscli
+ # boto3
+sacrebleu==2.4.3
+ # via lm-eval
+safetensors==0.4.5
+ # via
+ # accelerate
+ # peft
+ # timm
+ # transformers
+scikit-learn==1.5.2
+ # via
+ # librosa
+ # lm-eval
+ # sentence-transformers
+scipy==1.13.1
+ # via
+ # librosa
+ # scikit-learn
+ # sentence-transformers
+sentence-transformers==3.2.1
+ # via -r requirements-test.in
+six==1.16.0
+ # via
+ # python-dateutil
+ # rouge-score
+sniffio==1.3.1
+ # via
+ # anyio
+ # httpx
+soundfile==0.12.1
+ # via
+ # -r requirements-test.in
+ # librosa
+soxr==0.5.0.post1
+ # via librosa
+sqlitedict==2.1.0
+ # via lm-eval
+sympy==1.13.1
+ # via torch
+tabledata==1.3.3
+ # via pytablewriter
+tabulate==0.9.0
+ # via sacrebleu
+tcolorpy==0.1.6
+ # via pytablewriter
+tenacity==9.0.0
+ # via lm-eval
+tensorizer==2.9.0
+ # via -r requirements-test.in
+threadpoolctl==3.5.0
+ # via scikit-learn
+tiktoken==0.8.0
+ # via lm-eval
+timm==1.0.11
+ # via -r requirements-test.in
+tokenizers==0.20.1
+ # via transformers
+torch==2.5.0
+ # via
+ # -r requirements-test.in
+ # accelerate
+ # bitsandbytes
+ # lm-eval
+ # peft
+ # sentence-transformers
+ # tensorizer
+ # timm
+ # torchvision
+torchvision==0.20.0
+ # via timm
+tqdm==4.66.6
+ # via
+ # datasets
+ # evaluate
+ # huggingface-hub
+ # lm-eval
+ # nltk
+ # peft
+ # sentence-transformers
+ # tqdm-multiprocess
+ # transformers
+tqdm-multiprocess==0.0.11
+ # via lm-eval
+transformers==4.45.2
+ # via
+ # lm-eval
+ # peft
+ # sentence-transformers
+ # transformers-stream-generator
+transformers-stream-generator==0.0.5
+ # via -r requirements-test.in
+triton==3.1.0
+ # via torch
+typepy[datetime]==1.3.2
+ # via
+ # dataproperty
+ # pytablewriter
+ # tabledata
+typing-extensions==4.12.2
+ # via
+ # huggingface-hub
+ # librosa
+ # pydantic
+ # pydantic-core
+ # torch
+tzdata==2024.2
+ # via pandas
+urllib3==1.26.20
+ # via
+ # botocore
+ # requests
+word2number==1.1
+ # via lm-eval
+xxhash==3.5.0
+ # via
+ # datasets
+ # evaluate
+yarl==1.17.0
+ # via aiohttp
+zstandard==0.23.0
+ # via lm-eval
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
From 04a3ae0acae3d522299ec90b5730f876daa845e6 Mon Sep 17 00:00:00 2001
From: Yan Ma
Date: Wed, 30 Oct 2024 12:34:45 +0800
Subject: [PATCH 37/38] [Bugfix] Fix multi nodes TP+PP for XPU (#8884)
Signed-off-by: YiSheng5
Signed-off-by: yan ma
Co-authored-by: YiSheng5
---
.../getting_started/xpu-installation.rst | 18 +++++++++++++++
requirements-xpu.txt | 2 +-
vllm/distributed/parallel_state.py | 22 +++++++++++++++++++
vllm/executor/xpu_executor.py | 12 +++++++++-
vllm/platforms/__init__.py | 3 +++
vllm/platforms/xpu.py | 4 ++++
vllm/worker/xpu_worker.py | 13 ++++-------
7 files changed, 63 insertions(+), 11 deletions(-)
diff --git a/docs/source/getting_started/xpu-installation.rst b/docs/source/getting_started/xpu-installation.rst
index 151ebb5f1811f..b1868acbc84b0 100644
--- a/docs/source/getting_started/xpu-installation.rst
+++ b/docs/source/getting_started/xpu-installation.rst
@@ -60,3 +60,21 @@ Build from source
- FP16 is the default data type in the current XPU backend. The BF16 data
type will be supported in the future.
+
+Distributed inference and serving
+---------------------------------
+
+XPU platform supports tensor-parallel inference/serving and also supports pipeline parallel as a beta feature for online serving. We requires Ray as the distributed runtime backend. For example, a reference execution likes following:
+
+.. code-block:: console
+
+ $ python -m vllm.entrypoints.openai.api_server \
+ $ --model=facebook/opt-13b \
+ $ --dtype=bfloat16 \
+ $ --device=xpu \
+ $ --max_model_len=1024 \
+ $ --distributed-executor-backend=ray \
+ $ --pipeline-parallel-size=2 \
+ $ -tp=8
+
+By default, a ray instance will be launched automatically if no existing one is detected in system, with ``num-gpus`` equals to ``parallel_config.world_size``. We recommend properly starting a ray cluster before execution, referring helper `script `_.
diff --git a/requirements-xpu.txt b/requirements-xpu.txt
index ce83a178c618f..eb76a33dab5c2 100644
--- a/requirements-xpu.txt
+++ b/requirements-xpu.txt
@@ -13,4 +13,4 @@ torch == 2.3.1+cxx11.abi
intel-extension-for-pytorch == 2.3.110+xpu
oneccl_bind_pt == 2.3.100+xpu
-triton-xpu == 3.0.0b2
+triton-xpu == 3.0.0b1
diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py
index ec39856b6f67c..b04bbc478534c 100644
--- a/vllm/distributed/parallel_state.py
+++ b/vllm/distributed/parallel_state.py
@@ -431,6 +431,28 @@ def gather(self,
if dim < 0:
# Convert negative dim to positive.
dim += input_.dim()
+ # For xpu path, gather doesn't work properly together with ray
+ # cluster so we use all_gather instead for now.
+ if current_platform.is_xpu():
+ input_size = input_.size()
+ # Allocate output tensor.
+ output_tensor = torch.empty((world_size, ) + input_size,
+ dtype=input_.dtype,
+ device=input_.device)
+ # All-gather.
+ torch.distributed.all_gather_into_tensor(output_tensor,
+ input_,
+ group=self.device_group)
+ if self.rank_in_group == dst:
+ # Reshape
+ output_tensor = output_tensor.movedim(0, dim)
+ output_tensor = output_tensor.reshape(input_size[:dim] +
+ (world_size *
+ input_size[dim], ) +
+ input_size[dim + 1:])
+ else:
+ output_tensor = None
+ return output_tensor
# Allocate output tensor.
if self.rank_in_group == dst:
gather_list = [torch.empty_like(input_) for _ in range(world_size)]
diff --git a/vllm/executor/xpu_executor.py b/vllm/executor/xpu_executor.py
index bada56068507a..5f78993ddc4b4 100644
--- a/vllm/executor/xpu_executor.py
+++ b/vllm/executor/xpu_executor.py
@@ -44,7 +44,7 @@ def __init__(
self.cache_config = cache_config
self.load_config = load_config
self.lora_config = lora_config
- self.parallel_config = parallel_config
+ self.parallel_config = _verify_and_get_parallel_config(parallel_config)
self.scheduler_config = scheduler_config
self.device_config = device_config
self.prompt_adapter_config = prompt_adapter_config
@@ -94,3 +94,13 @@ def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig:
"mode.")
config.enforce_eager = True
return config
+
+
+def _verify_and_get_parallel_config(config: ParallelConfig) -> ParallelConfig:
+ if (config.distributed_executor_backend is not None
+ and config.distributed_executor_backend != "ray"):
+ logger.warning(
+ "%s is not supported on XPU, fallback to ray distributed executor "
+ "backend.", config.distributed_executor_backend)
+ config.distributed_executor_backend = "ray"
+ return config
diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py
index 7e9f8b1297b80..524150920b854 100644
--- a/vllm/platforms/__init__.py
+++ b/vllm/platforms/__init__.py
@@ -45,6 +45,9 @@
is_xpu = False
try:
+ # installed IPEX if the machine has XPUs.
+ import intel_extension_for_pytorch # noqa: F401
+ import oneccl_bindings_for_pytorch # noqa: F401
import torch
if hasattr(torch, 'xpu') and torch.xpu.is_available():
is_xpu = True
diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py
index d00e0dca84fff..106e8eddf458f 100644
--- a/vllm/platforms/xpu.py
+++ b/vllm/platforms/xpu.py
@@ -20,3 +20,7 @@ def get_device_name(device_id: int = 0) -> str:
def get_device_total_memory(cls, device_id: int = 0) -> int:
device_props = torch.xpu.get_device_properties(device_id)
return device_props.total_memory
+
+ @staticmethod
+ def inference_mode():
+ return torch.no_grad()
diff --git a/vllm/worker/xpu_worker.py b/vllm/worker/xpu_worker.py
index 917866f2d985b..c1d836bb0d318 100644
--- a/vllm/worker/xpu_worker.py
+++ b/vllm/worker/xpu_worker.py
@@ -14,7 +14,6 @@
SpeculativeConfig)
from vllm.distributed import (ensure_model_parallel_initialized,
init_distributed_environment)
-from vllm.distributed.parallel_state import get_pp_group
from vllm.logger import init_logger
from vllm.model_executor import set_random_seed
from vllm.platforms import current_platform
@@ -183,11 +182,10 @@ def init_worker_distributed_environment(self) -> None:
# use sockets as default Level zero IPC exchange backend. By
# default oneccl will use `drmfd` as mechanism which need extra
# dependency (libdrm and drm headers) on your system.
- ENV_CCL_ZE_IPC_EXCHANGE = os.getenv("CCL_ZE_IPC_EXCHANGE",
- "sockets")
+ ENV_CCL_ATL_TRANSPORT = os.getenv("CCL_ATL_TRANSPORT", "ofi")
ENV_LOCAL_WORLD_SIZE = os.getenv("LOCAL_WORLD_SIZE",
str(parallel_config.world_size))
- os.environ['CCL_ZE_IPC_EXCHANGE'] = ENV_CCL_ZE_IPC_EXCHANGE
+ os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT
os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE
os.environ["LOCAL_RANK"] = str(self.local_rank)
init_distributed_environment(
@@ -200,8 +198,5 @@ def init_worker_distributed_environment(self) -> None:
ensure_model_parallel_initialized(
parallel_config.tensor_parallel_size,
parallel_config.pipeline_parallel_size)
-
- if parallel_config.pipeline_parallel_size > 1:
- # torch-ccl xpu need a collective API warm up
- # before calling send/recv API
- get_pp_group().all_reduce(torch.zeros(1).xpu())
+ # global all_reduce needed for overall oneccl warm up
+ torch.distributed.all_reduce(torch.zeros(1).xpu())
From 7b0365efef35bb03aa94e0085199d20750409363 Mon Sep 17 00:00:00 2001
From: Russell Bryant
Date: Wed, 30 Oct 2024 01:22:23 -0400
Subject: [PATCH 38/38] [Doc] Add the DCO to CONTRIBUTING.md (#9803)
Signed-off-by: Russell Bryant
Co-authored-by: Michael Goin
Co-authored-by: Cyrus Leung
---
CONTRIBUTING.md | 12 +++++++++++-
DCO | 34 ++++++++++++++++++++++++++++++++++
2 files changed, 45 insertions(+), 1 deletion(-)
create mode 100644 DCO
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5f79356bd32f7..b39fd75b5fb70 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,12 +11,14 @@ We also believe in the power of community support; thus, answering queries, offe
Finally, one of the most impactful ways to support us is by raising awareness about vLLM. Talk about it in your blog posts and highlight how it's driving your incredible projects. Express your support on social media if you're using vLLM, or simply offer your appreciation by starring our repository!
+## License
+
+See [LICENSE](LICENSE).
## Developing
Depending on the kind of development you'd like to do (e.g. Python, CUDA), you can choose to build vLLM with or without compilation. Check out the [building from source](https://docs.vllm.ai/en/latest/getting_started/installation.html#build-from-source) documentation for details.
-
## Testing
```bash
@@ -33,6 +35,14 @@ pytest tests/
## Contribution Guidelines
+### DCO and Signed-off-by
+
+When contributing changes to this project, you must agree to the [DCO](DCO).
+Commits must include a `Signed-off-by:` header which certifies agreement with
+the terms of the [DCO](DCO).
+
+Using `-s` with `git commit` will automatically add this header.
+
### Issues
If you encounter a bug or have a feature request, please [search existing issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue) first to see if it has already been reported. If not, please [file a new issue](https://github.com/vllm-project/vllm/issues/new/choose), providing as much relevant information as possible.
diff --git a/DCO b/DCO
new file mode 100644
index 0000000000000..49b8cb0549267
--- /dev/null
+++ b/DCO
@@ -0,0 +1,34 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.