Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Refactor] Refactor llumlet to better adapt to different backend engines #4

Merged
merged 2 commits into from
Jul 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 3 additions & 10 deletions llumnix/backends/backend_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,8 @@

from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Iterable, List, Optional, Union, Tuple
from typing import Any, Iterable, List, Optional, Union

from llumnix.instance_info import InstanceInfo
from llumnix.llumlet.migrating_request import MigratingRequest
from llumnix.server_info import ServerInfo

Expand Down Expand Up @@ -63,14 +62,8 @@ def abort_request(self, request_id: Union[str, Iterable[str]]) -> None:
raise NotImplementedError

@abstractmethod
def step(self) -> Tuple[Any, InstanceInfo]:
"""Performs one inference iteration and returns the generated request outputs and instance info.

Returns:
A tuple containing request outputs and instance information after one iteration.
request_outputs: Results of one iteration, which include metadata such as output token indices,
output texts, etc.
instance_info: An `InstanceInfo` object representing the backend engine state after one iteration.
def _start_engine_loop(self) -> None:
"""Start step loop of backend engine.
"""
raise NotImplementedError

Expand Down
149 changes: 95 additions & 54 deletions llumnix/backends/vllm/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,13 @@
# limitations under the License.

import time
from typing import List, Optional, Tuple, Dict, Union, Iterable, Any
from typing import List, Optional, Dict, Union, Iterable, Any
from collections import defaultdict
import threading
import ray
# pylint: disable=unused-import
from ray.util.placement_group import PlacementGroup
from ray.util.queue import Queue as RayQueue

from vllm.engine.llm_engine import LLMEngine
from vllm.core.scheduler import ScheduledSequenceGroup
Expand All @@ -40,12 +43,21 @@


class LLMEngineLlumnix(LLMEngine):
def __init__(self, instance_id: str, *arg, **kwargs) -> None:
super().__init__(*arg, **kwargs)
self.instance_id = instance_id
self.step_counter = Counter()
self.instance_info = None
self.scaling_down = False
self.request_server_info: Dict[str, ServerInfo] = {}

# pylint: disable=W0221
@classmethod
def from_engine_args(
cls,
engine_args: EngineArgs,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
instance_id: str = None,
placement_group: Optional["PlacementGroup"] = None,
latency_mem: Optional[LatencyMemData] = None
) -> "LLMEngineLlumnix":
Expand All @@ -67,6 +79,7 @@ def from_engine_args(
raise ValueError('unimplemented executor backend')
# Create the LLM engine.
engine = cls(
instance_id=instance_id,
**engine_config.to_dict(),
executor_class=executor_class,
log_stats=not engine_args.disable_log_stats,
Expand Down Expand Up @@ -98,6 +111,66 @@ def _process_model_outputs(
seq_group_metadata_list = new_seq_group_metadata_list
return super()._process_model_outputs(output, scheduled_seq_groups, ignored_seq_groups, seq_group_metadata_list)

def step(self) -> None:
output_list = super().step()

instance_info: InstanceInfo = self.scheduler.get_record_instance_info()

if self.scaling_down:
instance_info.num_running_request = 1
instance_info.num_available_gpu_block = -self.cache_config.num_gpu_blocks
instance_info.num_available_gpu_block_waiting = -self.cache_config.num_gpu_blocks

instance_info.instance_id = self.instance_id
instance_info.step_id = next(self.step_counter)
instance_info.timestamp = time.time()
instance_info.latency = self.model_executor.last_inference_latency
seq_groups = self.scheduler.running
if seq_groups:
tot_blocks = []
for seq in seq_groups[-1].get_seqs(SequenceStatus.RUNNING):
blocks = self.scheduler.block_manager.get_block_table(seq)
tot_blocks.extend(blocks)
tot_blocks = set(tot_blocks)
instance_info.num_block_last_running_request = len(tot_blocks)

self.free_request_states(instance_info.finished_request_ids)

if len(output_list) > 0:
server_info_list = []
for output in output_list:
server_info_list.append(self.request_server_info[output.request_id])
self._put_request_output_to_server(output_list, server_info_list)
self.instance_info = instance_info

def _put_request_output_to_server(self, request_outputs, server_infos: List[ServerInfo]) -> None:
server_request_outputs = defaultdict(list)
server_queue: Dict[str, RayQueue] = {}
# Reorganize data in orther to put request output to queue in batch at one time.
for request_output, server_info in zip(request_outputs, server_infos):
server_id = server_info.server_id
request_output_queue = server_info.request_output_queue
server_request_outputs[server_id].append(request_output)
if server_id not in server_queue:
server_queue[server_id] = request_output_queue
for server_id, req_outputs in server_request_outputs.items():
try:
server_queue[server_id].actor.put_nowait_batch.remote(req_outputs)
except ray.exceptions.RayActorError:
logger.info("Server {} is dead".format(server_id))
request_ids = [req_output.request_id for req_output in req_outputs]
self.abort(request_ids)

def free_request_states(self, request_id: Union[str, Iterable[str]]) -> None:
if isinstance(request_id, str):
request_id = (request_id,)
request_ids = set(request_id)
for req_id in request_ids:
if req_id in self.request_server_info:
del self.request_server_info[req_id]
if req_id in self.scheduler.last_preemption_time_dict:
del self.scheduler.last_preemption_time_dict[req_id]

class BackendVLLM(BackendInterface):
def __init__(
self,
Expand All @@ -107,36 +180,42 @@ def __init__(
placement_group: "PlacementGroup"
) -> None:
assert migration_config.migration_backend == "rpc", "Gloo support will be released later."
self.engine: LLMEngineLlumnix = LLMEngineLlumnix.from_engine_args(engine_args=engine_args, placement_group=placement_group)
self.engine: LLMEngineLlumnix = LLMEngineLlumnix.from_engine_args(engine_args=engine_args, instance_id=instance_id,
placement_group=placement_group)
# multi-instance args
self.engine.scheduler = SchedulerLlumnix(self.engine.scheduler_config, self.engine.cache_config, self.engine.lora_config)
self.engine.output_processor.scheduler = self.engine.scheduler
self.instance_id = instance_id
self.step_counter = Counter()
self.scaling_down = False
self.worker_handle_list = self.engine.model_executor.workers.copy()
if len(self.worker_handle_list) + 1 == self.engine.parallel_config.world_size:
self.worker_handle_list.insert(0, ray.get_actor(f"instance_{self.instance_id}", namespace="llumnix"))
self._run_workers("init_migration", num_migration_cache_blocks=migration_config.migration_cache_blocks,\
src_worker_handle_list=self.worker_handle_list,
placement_group=placement_group)
self.request_server_info: Dict[str, ServerInfo] = {}
self._thread = threading.Thread(
target=self._start_engine_loop, args=(), daemon=True, name="engine_loop"
)
self._thread.start()

def _start_engine_loop(self) -> None:
while True:
self.engine.step()

def send_cpu_cache(self, *args, **kwargs):
# driver worker migration interface
return self.engine.model_executor.driver_worker.execute_method("send_cpu_cache", *args, **kwargs)

def stop_shutdown(self) -> None:
self.scaling_down = False
self.engine.scaling_down = False

def shutdown_workers(self):
migrated_requests = []

self.scaling_down = True
while self.has_unfinished_requests() and self.scaling_down:
self.engine.scaling_down = True
while self.has_unfinished_requests() and self.engine.scaling_down:
time.sleep(1)
time.sleep(0.1)
if self.scaling_down:
if self.engine.scaling_down:
self._run_workers(
"shutdown",
)
Expand All @@ -146,18 +225,18 @@ def restart_workers(self) -> None:
self._run_workers(
"restart",
)
self.scaling_down = False
self.engine.scaling_down = False

def add_request(self,
request_id: str,
server_info: ServerInfo,
*args,
**kwargs) -> None:
# When manager is unavailable, api server might dispatch the request that has already been dispatched.
if request_id in self.request_server_info:
if request_id in self.engine.request_server_info:
return
# Store the server information of each request to put the request outputs back to the corresponding api server correctly.
self.request_server_info[request_id] = server_info
self.engine.request_server_info[request_id] = server_info
self.engine.add_request(request_id, *args, **kwargs)

def commit_dst_request(self, backend_request: SequenceGroup, server_info: ServerInfo) -> None:
Expand All @@ -167,7 +246,7 @@ def commit_dst_request(self, backend_request: SequenceGroup, server_info: Server
pre_alloc_blocks = self.engine.scheduler.pre_alloc_cache_dict.pop(backend_request.request_id)
self.engine.scheduler.block_manager.add_block_table(pre_alloc_blocks, seq.seq_id)
self.add_running_request(backend_request)
self.request_server_info[backend_request.request_id] = server_info
self.engine.request_server_info[backend_request.request_id] = server_info

def send_blocks(self, dst_ray_actor: "ray.actor.ActorHandle", src_blocks: List[int], dst_blocks: List[int]) -> None:
ray.get(dst_ray_actor.execute_engine_method.remote("_run_workers",
Expand All @@ -176,37 +255,6 @@ def send_blocks(self, dst_ray_actor: "ray.actor.ActorHandle", src_blocks: List[i
src_blocks=src_blocks,
src_worker_handle_list=self.worker_handle_list))

def step(self) -> Tuple[List[RequestOutput], InstanceInfo, List[ServerInfo]]:
output_list = self.engine.step()

instance_info: InstanceInfo = self.engine.scheduler.get_record_instance_info()

if self.scaling_down:
instance_info.num_running_request = 1
instance_info.num_available_gpu_block = -self.cache_config.num_gpu_blocks
instance_info.num_available_gpu_block_waiting = -self.cache_config.num_gpu_blocks

instance_info.instance_id = self.instance_id
instance_info.step_id = next(self.step_counter)
instance_info.timestamp = time.time()
instance_info.latency = self.engine.model_executor.last_inference_latency
seq_groups = self.engine.scheduler.running
if seq_groups:
tot_blocks = []
for seq in seq_groups[-1].get_seqs(SequenceStatus.RUNNING):
blocks = self.engine.scheduler.block_manager.get_block_table(seq)
tot_blocks.extend(blocks)
tot_blocks = set(tot_blocks)
instance_info.num_block_last_running_request = len(tot_blocks)

server_info_list = []
for output in output_list:
server_info_list.append(self.request_server_info[output.request_id])

self.free_request_states(instance_info.finished_request_ids)

return output_list, instance_info, server_info_list

def _run_workers(self, *args, **kwargs):
# pylint: disable=protected-access
return self.engine.model_executor._run_workers(*args, **kwargs)
Expand All @@ -221,15 +269,8 @@ def abort_request(self, request_id: Union[str, Iterable[str]]) -> None:
self.free_request_states(request_ids)
return self.engine.abort_request(request_ids)

def free_request_states(self, request_id: Union[str, Iterable[str]]):
if isinstance(request_id, str):
request_id = (request_id,)
request_ids = set(request_id)
for req_id in request_ids:
if req_id in self.request_server_info:
del self.request_server_info[req_id]
if req_id in self.engine.scheduler.last_preemption_time_dict:
del self.engine.scheduler.last_preemption_time_dict[req_id]
def free_request_states(self, request_id: Union[str, Iterable[str]]) -> None:
return self.engine.free_request_states(request_id)

def get_request_incremental_blocks(self, *args, **kwargs) -> List[int]:
return self.engine.scheduler.get_request_incremental_blocks(*args, **kwargs)
Expand Down Expand Up @@ -274,7 +315,7 @@ def get_shortest_running_request(self) -> Optional[MigratingRequest]:
return self.engine.scheduler.get_shortest_running_request()

def get_request_server_info(self, request_id: str) -> ServerInfo:
return self.request_server_info[request_id]
return self.engine.request_server_info[request_id]

def get_all_request_ids(self) -> List[str]:
return self.engine.scheduler.get_all_request_ids()
38 changes: 2 additions & 36 deletions llumnix/llumlet/llumlet.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List, Dict, Union, Iterable
from typing import List, Union, Iterable
import time
from collections import defaultdict
import ray
from ray.util.queue import Queue as RayQueue
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy

Expand Down Expand Up @@ -51,7 +49,6 @@ def __init__(self,
self.migration_scheduler = LocalMigrationScheduler(migration_config.migrate_policy,
self.backend_engine)
self.log_requests = True
self.instance_info = None

@classmethod
def from_args(cls,
Expand Down Expand Up @@ -94,10 +91,6 @@ def from_args(cls,
node_id=ray.get_runtime_context().get_node_id(),
soft=False,))
llumlet = engine_class.remote(instance_id, backend_type, migration_config, *args, **kwargs)
# circular dependency
# engine_manager = ray.get_actor(MANAGER_ACTOR_NAME, namespace='llumnix')
# retry_manager_ray_call_by_ray_get(engine_manager.scale_up.remote, 'scale_up', instance_id, llumlet)
llumlet.run_engine_loop.remote()
return llumlet

def migrate_out(self, dst_instance_name: str) -> List[str]:
Expand Down Expand Up @@ -129,7 +122,7 @@ def migrate_out(self, dst_instance_name: str) -> List[str]:
return migrated_request_list

def get_instance_info(self) -> InstanceInfo:
return self.instance_info
return self.backend_engine.engine.instance_info

def get_actor_name(self) -> str:
return self.actor_name
Expand Down Expand Up @@ -159,15 +152,6 @@ def abort(self, request_id: Union[str, Iterable[str]]) -> None:
request_ids = set(request_id)
return self.backend_engine.abort_request(request_ids)

def run_engine_loop(self) -> None:
while True:
request_outputs, instance_info, server_infos = self.backend_engine.step()
self.instance_info = instance_info
if len(request_outputs) == 0:
time.sleep(0.01)
else:
self._put_request_output_to_server(request_outputs, server_infos)

def clear_migration_states(self, is_migrate_in: bool) -> None:
logger.info("instance {} clear_migration_states, is_migrate_in: {}".format(self.instance_id, is_migrate_in))
if is_migrate_in:
Expand All @@ -182,24 +166,6 @@ def clear_migration_states(self, is_migrate_in: bool) -> None:
logger.info("clear_migration_states: add request {} back to engine".format(backend_request.request_id))
self.backend_engine.add_running_request(backend_request)

def _put_request_output_to_server(self, request_outputs, server_infos: List[ServerInfo]) -> None:
server_request_outputs = defaultdict(list)
server_queue: Dict[str, RayQueue] = {}
# Reorganize data in orther to put request output to queue in batch at one time.
for request_output, server_info in zip(request_outputs, server_infos):
server_id = server_info.server_id
request_output_queue = server_info.request_output_queue
server_request_outputs[server_id].append(request_output)
if server_id not in server_queue:
server_queue[server_id] = request_output_queue
for server_id, req_outputs in server_request_outputs.items():
try:
server_queue[server_id].actor.put_nowait_batch.remote(req_outputs)
except ray.exceptions.RayActorError:
logger.info("Server {} is dead".format(server_id))
request_ids = [req_output.request_id for req_output in req_outputs]
self.abort(request_ids)

def execute_migration_method(self, method, *args, **kwargs):
executor = getattr(self.migration_coordinator, method)
return executor(*args, **kwargs)
Expand Down
Loading