Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

XLA optimized Implementation of StaticCache with Tensor Indexing API #31129

Closed
wants to merge 14 commits into from
Closed
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 20 additions & 4 deletions src/transformers/cache_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
import torch

from .configuration_utils import PretrainedConfig
from .utils import is_hqq_available, is_quanto_available, logging

from .utils import is_hqq_available, is_quanto_available, logging, is_torch_xla_available

if is_quanto_available():
from quanto import QBitsTensor, qint2, qint4
Expand Down Expand Up @@ -792,8 +791,25 @@ def update(
k_out = self.key_cache[layer_idx]
v_out = self.value_cache[layer_idx]

k_out[:, :, cache_position] = key_states
v_out[:, :, cache_position] = value_states
if is_torch_xla_available(): # If torch_xla is available, do out-of-place operation on KV_Cache and create a new list
k_out = k_out.index_copy(2, cache_position, key_states)
v_out = v_out.index_copy(2, cache_position, value_states)

updated_key_cache = [
k_out if i == layer_idx else self.key_cache[i] for i in range(len(self.key_cache))
]

updated_value_cache = [
v_out if i == layer_idx else self.value_cache[i] for i in range(len(self.value_cache))
]

self.key_cache = updated_key_cache
self.value_cache = updated_value_cache

return k_out, v_out

k_out.index_copy_(2, cache_position, key_states)
v_out.index_copy_(2, cache_position, value_states)

return k_out, v_out

Expand Down