From 3b5dfb75464a6dd1ecb0c18ed3e164b7d0f1d164 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 13 Oct 2023 15:23:09 +0200 Subject: [PATCH 01/24] [Attn Mask Converter] refactor attn mask --- .../models/llama/modeling_llama.py | 37 +++++++++++++++---- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index b697387f5f5b..9ed45fa6e007 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -41,6 +41,24 @@ from .configuration_llama import LlamaConfig +class AttentionMask2DTo4D: + def __init__(self, is_causal: bool): + self.is_causal = is_causal + self.cached_2d_tensor = None + self.cached_4d_tensor = None + + def __call__(self, attention_mask_2d: torch.Tensor): + """ + Multiplies the given tensor x by -10,000. + If the cached tensor does not exist or has a different size, a new one is allocated. + """ + if self.cached_2d_tensor is None or (attention_mask_2d != self.cached_2d_tensor).any(): + self.cached_2d_tensor = attention_mask_2d + self.cached_4d_tensor = self._expand_2d_mask(attention_mask_2d, is_causal=is_causal) + + return self.cached_4d_tensor + + if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa @@ -272,6 +290,7 @@ def __init__(self, config: LlamaConfig): self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta + self.mask_converter = mask_converter if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -376,6 +395,8 @@ def forward( f" {attn_weights.size()}" ) + # convert 2d -> 4d. Re-use cached mask if available + attention_mask = self.attn_mask_converter(attention_mask) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( @@ -420,12 +441,11 @@ class LlamaFlashAttention2(LlamaAttention): def forward( self, hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # LlamaFlashAttention2 attention does not support output_attentions output_attentions = False @@ -485,7 +505,7 @@ def forward( value_states = value_states.to(torch.float16) attn_output = self._flash_attention_forward( - query_states, key_states, value_states, padding_mask, q_len, dropout=dropout_rate + query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() @@ -538,7 +558,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=True, + causal=self.mask_converter.is_causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) @@ -589,13 +609,13 @@ def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_l class LlamaDecoderLayer(nn.Module): - def __init__(self, config: LlamaConfig): + def __init__(self, config: LlamaConfig, mask_converter=None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ( - LlamaAttention(config=config) + LlamaAttention(config=config, mask_converter=mask_converter) if not getattr(config, "_flash_attn_2_enabled", False) - else LlamaFlashAttention2(config=config) + else LlamaFlashAttention2(config=config, mask_converter=mask_converter) ) self.mlp = LlamaMLP(config) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -784,8 +804,9 @@ def __init__(self, config: LlamaConfig): self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size + attn_mask_converter = AttentionMask2DTo4D(is_causal=True) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.layers = nn.ModuleList([LlamaDecoderLayer(config, attn_mask_converter) for _ in range(config.num_hidden_layers)]) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False From 503262801632f625af3197e2fa8e16f838a2262c Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 13 Oct 2023 15:26:57 +0200 Subject: [PATCH 02/24] up --- .../models/llama/modeling_llama.py | 71 ++++++++----------- 1 file changed, 29 insertions(+), 42 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 9ed45fa6e007..157de1c0651d 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -47,17 +47,42 @@ def __init__(self, is_causal: bool): self.cached_2d_tensor = None self.cached_4d_tensor = None - def __call__(self, attention_mask_2d: torch.Tensor): + def __call__(self, attention_mask_2d: torch.Tensor, input_shape, past_key_values_length, dtype): """ Multiplies the given tensor x by -10,000. If the cached tensor does not exist or has a different size, a new one is allocated. """ if self.cached_2d_tensor is None or (attention_mask_2d != self.cached_2d_tensor).any(): self.cached_2d_tensor = attention_mask_2d - self.cached_4d_tensor = self._expand_2d_mask(attention_mask_2d, is_causal=is_causal) + self.cached_4d_tensor = self._prepare_decoder_attention_mask(attention_mask_2d, input_shape, past_key_values_length, dtype) return self.cached_4d_tensor + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length, dtype): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + dtype, + device=attention_mask.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, dtype, tgt_len=input_shape[-1]).to( + attention_mask.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + if is_flash_attn_2_available(): from flash_attn import flash_attn_func, flash_attn_varlen_func @@ -280,7 +305,7 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: class LlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" - def __init__(self, config: LlamaConfig): + def __init__(self, config: LlamaConfig, mask_converter=None): super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -629,7 +654,6 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, - padding_mask: Optional[torch.LongTensor] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: @@ -657,7 +681,6 @@ def forward( past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, - padding_mask=padding_mask, ) hidden_states = residual + hidden_states @@ -819,30 +842,6 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embed_tokens = value - # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask - def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - combined_attention_mask = None - if input_shape[-1] > 1: - combined_attention_mask = _make_causal_mask( - input_shape, - inputs_embeds.dtype, - device=inputs_embeds.device, - past_key_values_length=past_key_values_length, - ) - - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( - inputs_embeds.device - ) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask - ) - - return combined_attention_mask - @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def forward( self, @@ -891,17 +890,6 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions - if attention_mask is None: - attention_mask = torch.ones( - (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device - ) - padding_mask = None - else: - if 0 in attention_mask: - padding_mask = attention_mask - else: - padding_mask = None - attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) @@ -931,7 +919,7 @@ def forward( def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value - return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask) + return module(*inputs, past_key_value, output_attentions) return custom_forward @@ -946,7 +934,6 @@ def custom_forward(*inputs): past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, - padding_mask=padding_mask, ) hidden_states = layer_outputs[0] From 0bfbc1adc4ff7fb2f1ab105d40976413745829bb Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 10:42:56 +0200 Subject: [PATCH 03/24] Apply suggestions from code review Co-authored-by: fxmarty <9808326+fxmarty@users.noreply.github.com> --- src/transformers/models/llama/modeling_llama.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 157de1c0651d..2051dc47b16c 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -49,8 +49,7 @@ def __init__(self, is_causal: bool): def __call__(self, attention_mask_2d: torch.Tensor, input_shape, past_key_values_length, dtype): """ - Multiplies the given tensor x by -10,000. - If the cached tensor does not exist or has a different size, a new one is allocated. + Prepares and caches an extended 4D float mask of shape (batch_size, 1, tgt_seq_len, src_seq_len), and caches as well attention_mask_2d. """ if self.cached_2d_tensor is None or (attention_mask_2d != self.cached_2d_tensor).any(): self.cached_2d_tensor = attention_mask_2d From d173ce309c63fa323a91b6d56fc352ff76ec019b Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 11:34:07 +0200 Subject: [PATCH 04/24] improve --- .../models/llama/modeling_llama.py | 138 ++++++++++-------- 1 file changed, 74 insertions(+), 64 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 157de1c0651d..e575f76fbbbc 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -47,14 +47,27 @@ def __init__(self, is_causal: bool): self.cached_2d_tensor = None self.cached_4d_tensor = None - def __call__(self, attention_mask_2d: torch.Tensor, input_shape, past_key_values_length, dtype): + def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype): """ - Multiplies the given tensor x by -10,000. - If the cached tensor does not exist or has a different size, a new one is allocated. + Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, + key_value_length) shape and by adding a -10,000 bias to not-attended positions. If cached 4D attention mask can + be reused, no new memory will be allocated. """ - if self.cached_2d_tensor is None or (attention_mask_2d != self.cached_2d_tensor).any(): + # If 2d shape doesn't match or expected 4d shape doesn't match or 2d attention_mask values don't match, + # a new (bsz, head_dim=1, query_length, key_value_length) 4D mask is created and cached + if ( + self.cached_2d_tensor is None + or attention_mask_2d.shape != self.cached_2d_tensor.shape + or (query_length, key_value_length) != self.cached_4d_tensor.shape[-2:] + or (attention_mask_2d != self.cached_2d_tensor).any() + ): self.cached_2d_tensor = attention_mask_2d - self.cached_4d_tensor = self._prepare_decoder_attention_mask(attention_mask_2d, input_shape, past_key_values_length, dtype) + input_shape = (attention_mask_2d.shape[0], query_length) + past_key_values_length = key_value_length - query_length + + self.cached_4d_tensor = self._prepare_decoder_attention_mask( + attention_mask_2d, input_shape, past_key_values_length, dtype + ) return self.cached_4d_tensor @@ -63,8 +76,8 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_ # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None - if input_shape[-1] > 1: - combined_attention_mask = _make_causal_mask( + if input_shape[-1] > 1 and self.is_causal: + combined_attention_mask = self._make_causal_mask( input_shape, dtype, device=attention_mask.device, @@ -73,7 +86,7 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_ if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, dtype, tgt_len=input_shape[-1]).to( + expanded_attn_mask = self._expand_mask(attention_mask, dtype, tgt_len=input_shape[-1]).to( attention_mask.device ) combined_attention_mask = ( @@ -82,6 +95,36 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_ return combined_attention_mask + # Copied from transformers.models.bart.modeling_bart._make_causal_mask + def _make_causal_mask( + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + ): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + # Copied from transformers.models.bart.modeling_bart._expand_mask + def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) if is_flash_attn_2_available(): @@ -94,9 +137,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_ _CONFIG_FOR_DOC = "LlamaConfig" -def _get_unpad_data(padding_mask): - seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32) - indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten() +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( @@ -106,39 +149,6 @@ def _get_unpad_data(padding_mask): ) -# Copied from transformers.models.bart.modeling_bart._make_causal_mask -def _make_causal_mask( - input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 -): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz, tgt_len = input_ids_shape - mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) - mask_cond = torch.arange(mask.size(-1), device=device) - mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) - mask = mask.to(dtype) - - if past_key_values_length > 0: - mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) - - -# Copied from transformers.models.bart.modeling_bart._expand_mask -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) - - class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ @@ -366,7 +376,6 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() @@ -420,9 +429,9 @@ def forward( f" {attn_weights.size()}" ) - # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.attn_mask_converter(attention_mask) if attention_mask is not None: + # convert 2d -> 4d. Re-use cached mask if available + attention_mask = self.mask_converter(attention_mask, q_len, kv_seq_len, attn_weights.dtype) if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" @@ -466,11 +475,11 @@ class LlamaFlashAttention2(LlamaAttention): def forward( self, hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - attention_mask: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # LlamaFlashAttention2 attention does not support output_attentions output_attentions = False @@ -542,7 +551,7 @@ def forward( return attn_output, attn_weights, past_key_value def _flash_attention_forward( - self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token @@ -555,7 +564,7 @@ def _flash_attention_forward( Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API - padding_mask (`torch.Tensor`): + attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): @@ -564,10 +573,10 @@ def _flash_attention_forward( The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ # Contains at least one padding token in the sequence - if padding_mask is not None: + if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( - query_states, key_states, value_states, padding_mask, query_length + query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens @@ -594,8 +603,8 @@ def _flash_attention_forward( return attn_output - def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length): - indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(padding_mask) + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( @@ -620,8 +629,8 @@ def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_l query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. - padding_mask = padding_mask[:, -query_length:] - query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, padding_mask) + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, @@ -827,9 +836,14 @@ def __init__(self, config: LlamaConfig): self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size - attn_mask_converter = AttentionMask2DTo4D(is_causal=True) + # create attn_mask converter that trickles down to each attention layer + # so that the attention_mask cache can be shared among layers + mask_converter = AttentionMask2DTo4D(is_causal=True) + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.layers = nn.ModuleList([LlamaDecoderLayer(config, attn_mask_converter) for _ in range(config.num_hidden_layers)]) + self.layers = nn.ModuleList( + [LlamaDecoderLayer(config, mask_converter) for _ in range(config.num_hidden_layers)] + ) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False @@ -867,9 +881,9 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - batch_size, seq_length = input_ids.shape + seq_length = input_ids.shape[1] elif inputs_embeds is not None: - batch_size, seq_length, _ = inputs_embeds.shape + seq_length = inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") @@ -890,10 +904,6 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions - attention_mask = self._prepare_decoder_attention_mask( - attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length - ) - hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: From e542a6d77c93a529d3827d2770eb2553077cb45b Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 09:43:08 +0000 Subject: [PATCH 05/24] rename --- .../models/llama/modeling_llama.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index e575f76fbbbc..63ad07fac4ea 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -44,8 +44,8 @@ class AttentionMask2DTo4D: def __init__(self, is_causal: bool): self.is_causal = is_causal - self.cached_2d_tensor = None - self.cached_4d_tensor = None + self.cached_2d_mask = None + self.cached_4d_mask = None def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype): """ @@ -56,20 +56,20 @@ def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value # If 2d shape doesn't match or expected 4d shape doesn't match or 2d attention_mask values don't match, # a new (bsz, head_dim=1, query_length, key_value_length) 4D mask is created and cached if ( - self.cached_2d_tensor is None - or attention_mask_2d.shape != self.cached_2d_tensor.shape - or (query_length, key_value_length) != self.cached_4d_tensor.shape[-2:] - or (attention_mask_2d != self.cached_2d_tensor).any() + self.cached_2d_mask is None + or attention_mask_2d.shape != self.cached_2d_mask.shape + or (query_length, key_value_length) != self.cached_4d_mask.shape[-2:] + or (attention_mask_2d != self.cached_2d_mask).any() ): - self.cached_2d_tensor = attention_mask_2d + self.cached_2d_mask = attention_mask_2d input_shape = (attention_mask_2d.shape[0], query_length) past_key_values_length = key_value_length - query_length - self.cached_4d_tensor = self._prepare_decoder_attention_mask( + self.cached_4d_mask = self._prepare_decoder_attention_mask( attention_mask_2d, input_shape, past_key_values_length, dtype ) - return self.cached_4d_tensor + return self.cached_4d_mask # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length, dtype): From 0256cd5e906514ec28009fb623f6140c83e44853 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 10:16:19 +0000 Subject: [PATCH 06/24] better cache --- src/transformers/models/llama/modeling_llama.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 63ad07fac4ea..a476ed48aa54 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -44,8 +44,7 @@ class AttentionMask2DTo4D: def __init__(self, is_causal: bool): self.is_causal = is_causal - self.cached_2d_mask = None - self.cached_4d_mask = None + self.cache = {} def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype): """ @@ -55,21 +54,16 @@ def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value """ # If 2d shape doesn't match or expected 4d shape doesn't match or 2d attention_mask values don't match, # a new (bsz, head_dim=1, query_length, key_value_length) 4D mask is created and cached - if ( - self.cached_2d_mask is None - or attention_mask_2d.shape != self.cached_2d_mask.shape - or (query_length, key_value_length) != self.cached_4d_mask.shape[-2:] - or (attention_mask_2d != self.cached_2d_mask).any() - ): - self.cached_2d_mask = attention_mask_2d + if attention_mask_2d not in self.cache: input_shape = (attention_mask_2d.shape[0], query_length) past_key_values_length = key_value_length - query_length - self.cached_4d_mask = self._prepare_decoder_attention_mask( + cached_4d_mask = self._prepare_decoder_attention_mask( attention_mask_2d, input_shape, past_key_values_length, dtype ) + self.cache[attention_mask_2d] = cached_4d_mask - return self.cached_4d_mask + return self.cache[attention_mask_2d] # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length, dtype): From 7e50ec0e0f2b3bd684c354a0170bf26dbf13b8be Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 10:37:53 +0000 Subject: [PATCH 07/24] renaming --- .../models/llama/modeling_llama.py | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index a476ed48aa54..936fc1560415 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -41,7 +41,7 @@ from .configuration_llama import LlamaConfig -class AttentionMask2DTo4D: +class AttentionMaskMapper: def __init__(self, is_causal: bool): self.is_causal = is_causal self.cache = {} @@ -52,6 +52,10 @@ def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value key_value_length) shape and by adding a -10,000 bias to not-attended positions. If cached 4D attention mask can be reused, no new memory will be allocated. """ + # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache + if attention_mask_2d not in self.cache and len(self.cache) > 0: + self.cache = {} + # If 2d shape doesn't match or expected 4d shape doesn't match or 2d attention_mask values don't match, # a new (bsz, head_dim=1, query_length, key_value_length) 4D mask is created and cached if attention_mask_2d not in self.cache: @@ -309,7 +313,7 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: class LlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" - def __init__(self, config: LlamaConfig, mask_converter=None): + def __init__(self, config: LlamaConfig, attn_mask_4d_mapper=None): super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -319,7 +323,7 @@ def __init__(self, config: LlamaConfig, mask_converter=None): self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta - self.mask_converter = mask_converter + self.attn_mask_4d_mapper = attn_mask_4d_mapper if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -425,7 +429,7 @@ def forward( if attention_mask is not None: # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.mask_converter(attention_mask, q_len, kv_seq_len, attn_weights.dtype) + attention_mask = self.attn_mask_4d_mapper(attention_mask, q_len, kv_seq_len, attn_weights.dtype) if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" @@ -586,7 +590,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=self.mask_converter.is_causal, + causal=self.attn_mask_4d_mapper.is_causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) @@ -637,13 +641,13 @@ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query class LlamaDecoderLayer(nn.Module): - def __init__(self, config: LlamaConfig, mask_converter=None): + def __init__(self, config: LlamaConfig, attn_mask_4d_mapper=None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ( - LlamaAttention(config=config, mask_converter=mask_converter) + LlamaAttention(config=config, attn_mask_4d_mapper=attn_mask_4d_mapper) if not getattr(config, "_flash_attn_2_enabled", False) - else LlamaFlashAttention2(config=config, mask_converter=mask_converter) + else LlamaFlashAttention2(config=config, attn_mask_4d_mapper=attn_mask_4d_mapper) ) self.mlp = LlamaMLP(config) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -830,13 +834,13 @@ def __init__(self, config: LlamaConfig): self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size - # create attn_mask converter that trickles down to each attention layer + # create attn_mask mapper that trickles down to each attention layer # so that the attention_mask cache can be shared among layers - mask_converter = AttentionMask2DTo4D(is_causal=True) + attn_mask_4d_mapper = AttentionMaskMapper(is_causal=True) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( - [LlamaDecoderLayer(config, mask_converter) for _ in range(config.num_hidden_layers)] + [LlamaDecoderLayer(config, attn_mask_4d_mapper) for _ in range(config.num_hidden_layers)] ) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) From 30fc4c3685f02d3117b713ffbc3fa26f6d5fc1b2 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 11:42:48 +0000 Subject: [PATCH 08/24] improve more --- .../models/llama/modeling_llama.py | 149 ++++++++++++------ 1 file changed, 98 insertions(+), 51 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 936fc1560415..f823c8330e40 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -41,57 +41,99 @@ from .configuration_llama import LlamaConfig -class AttentionMaskMapper: +class AttentionMaskCache: def __init__(self, is_causal: bool): self.is_causal = is_causal - self.cache = {} + self.cache_4d_mask = {} + self.cache_4d_mask_only_causal = {} + self.cache_has_mask = {} - def __call__(self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype): + def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: + """ + Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. + """ + if attention_mask_2d not in self.cache_has_mask and len(self.cache_has_mask) > 0: + self.cache_has_mask = {} + + if attention_mask_2d not in self.cache_has_mask: + self.cache_has_mask[attention_mask_2d] = 0 in attention_mask_2d + + return self.cache_has_mask[attention_mask_2d] + + def to_causal_4d( + self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: torch.device + ) -> torch.Tensor: + """ + Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative + bias to upper right hand triangular matrix (causal mask). If cached 4D attention mask can be reused, no new + memory will be allocated. + """ + expected_shape = (batch_size, 1, query_length, key_value_length) + + # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache + if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: + self.cache_4d_mask_only_causal = {} + + # If shape is not cached, create a new causal mask and cache it + if expected_shape not in self.cache_4d_mask: + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 and self.is_causal: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + ) + + self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask + + return self.cache_4d_mask_only_causal[expected_shape] + + def to_4d( + self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype + ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, - key_value_length) shape and by adding a -10,000 bias to not-attended positions. If cached 4D attention mask can - be reused, no new memory will be allocated. + key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is + causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be + allocated. """ # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if attention_mask_2d not in self.cache and len(self.cache) > 0: - self.cache = {} + if attention_mask_2d not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: + self.cache_4d_mask = {} - # If 2d shape doesn't match or expected 4d shape doesn't match or 2d attention_mask values don't match, - # a new (bsz, head_dim=1, query_length, key_value_length) 4D mask is created and cached - if attention_mask_2d not in self.cache: + # If attention_mask is not cached, create a new one and cache it + if attention_mask_2d not in self.cache_4d_mask: input_shape = (attention_mask_2d.shape[0], query_length) past_key_values_length = key_value_length - query_length - cached_4d_mask = self._prepare_decoder_attention_mask( - attention_mask_2d, input_shape, past_key_values_length, dtype - ) - self.cache[attention_mask_2d] = cached_4d_mask - - return self.cache[attention_mask_2d] - - # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask - def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length, dtype): - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - combined_attention_mask = None - if input_shape[-1] > 1 and self.is_causal: - combined_attention_mask = self._make_causal_mask( - input_shape, - dtype, - device=attention_mask.device, - past_key_values_length=past_key_values_length, - ) + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 and self.is_causal: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + ) - if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = self._expand_mask(attention_mask, dtype, tgt_len=input_shape[-1]).to( - attention_mask.device - ) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device ) + cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - return combined_attention_mask + self.cache_4d_mask[attention_mask_2d] = cached_4d_mask + + return self.cache_4d_mask[attention_mask_2d] # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( @@ -313,7 +355,7 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: class LlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" - def __init__(self, config: LlamaConfig, attn_mask_4d_mapper=None): + def __init__(self, config: LlamaConfig, attention_mask_cache=None): super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -323,7 +365,7 @@ def __init__(self, config: LlamaConfig, attn_mask_4d_mapper=None): self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta - self.attn_mask_4d_mapper = attn_mask_4d_mapper + self.attention_mask_cache = attention_mask_cache if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -429,7 +471,14 @@ def forward( if attention_mask is not None: # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.attn_mask_4d_mapper(attention_mask, q_len, kv_seq_len, attn_weights.dtype) + attention_mask = self.attention_mask_cache.to_4d(attention_mask, q_len, kv_seq_len, attn_weights.dtype) + elif self.attention_mask_cache.is_causal: + # create 4d causal mask. Re-use cached mask if available + attention_mask = self.attention_mask_cache.to_causal_4d( + bsz, q_len, kv_seq_len, attn_weights.dtype, attn_weights.device + ) + + if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" @@ -571,7 +620,7 @@ def _flash_attention_forward( The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ # Contains at least one padding token in the sequence - if attention_mask is not None: + if attention_mask is not None and self.attention_mask_cache.has_mask(attention_mask): batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length @@ -590,7 +639,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=self.attn_mask_4d_mapper.is_causal, + causal=self.attention_mask_cache.is_causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) @@ -641,13 +690,13 @@ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query class LlamaDecoderLayer(nn.Module): - def __init__(self, config: LlamaConfig, attn_mask_4d_mapper=None): + def __init__(self, config: LlamaConfig, attention_mask_cache=None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ( - LlamaAttention(config=config, attn_mask_4d_mapper=attn_mask_4d_mapper) + LlamaAttention(config=config, attention_mask_cache=attention_mask_cache) if not getattr(config, "_flash_attn_2_enabled", False) - else LlamaFlashAttention2(config=config, attn_mask_4d_mapper=attn_mask_4d_mapper) + else LlamaFlashAttention2(config=config, attention_mask_cache=attention_mask_cache) ) self.mlp = LlamaMLP(config) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -836,11 +885,11 @@ def __init__(self, config: LlamaConfig): # create attn_mask mapper that trickles down to each attention layer # so that the attention_mask cache can be shared among layers - attn_mask_4d_mapper = AttentionMaskMapper(is_causal=True) + attention_mask_cache = AttentionMaskCache(is_causal=True) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( - [LlamaDecoderLayer(config, attn_mask_4d_mapper) for _ in range(config.num_hidden_layers)] + [LlamaDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)] ) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -879,18 +928,15 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - seq_length = input_ids.shape[1] + batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: - seq_length = inputs_embeds.shape[1] + batch_size, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") - seq_length_with_past = seq_length past_key_values_length = 0 - if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] - seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device @@ -901,6 +947,7 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + # embed positions hidden_states = inputs_embeds From 2301d6baebc7647ba83f880bffda5ea39c8bc14f Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 11:43:43 +0000 Subject: [PATCH 09/24] improve --- src/transformers/models/llama/modeling_llama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index f823c8330e40..4478b6db515b 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -928,9 +928,9 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - batch_size, seq_length = input_ids.shape[:2] + _, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: - batch_size, seq_length = inputs_embeds.shape[:2] + _, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") From 4cfb7cb4136030bc915fddbc058ed059991a6f6a Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 11:51:52 +0000 Subject: [PATCH 10/24] fix bug --- src/transformers/models/llama/modeling_llama.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 4478b6db515b..42dffb1766ea 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -44,6 +44,7 @@ class AttentionMaskCache: def __init__(self, is_causal: bool): self.is_causal = is_causal + self.cache_4d_mask = {} self.cache_4d_mask_only_causal = {} self.cache_has_mask = {} @@ -75,7 +76,7 @@ def to_causal_4d( self.cache_4d_mask_only_causal = {} # If shape is not cached, create a new causal mask and cache it - if expected_shape not in self.cache_4d_mask: + if expected_shape not in self.cache_4d_mask_only_causal: input_shape = (batch_size, query_length) past_key_values_length = key_value_length - query_length From bdb39ae3bf78ceb948e34ce7af0bc9c3a1770605 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 16 Oct 2023 11:59:48 +0000 Subject: [PATCH 11/24] finalize --- src/transformers/models/llama/modeling_llama.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 42dffb1766ea..c79ab64d4dd6 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -42,6 +42,16 @@ class AttentionMaskCache: + """ + A utility attention mask class that allows: + - Create a causal mask 4d mask + - Convert a 2D attention mask (batch_size, query_length) to a 4D attention mask (batch_size, 1, query_length, + key_value_length) that can be multiplied with attention scores + - Check whether 2D attention mask has any padding tokens or not + + To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. + """ + def __init__(self, is_causal: bool): self.is_causal = is_causal @@ -884,7 +894,7 @@ def __init__(self, config: LlamaConfig): self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size - # create attn_mask mapper that trickles down to each attention layer + # create attention mask cache that trickles down to each attention layer # so that the attention_mask cache can be shared among layers attention_mask_cache = AttentionMaskCache(is_causal=True) From 4387ab8d58f15c9cbc0d3c3f4e2065be6aa6b673 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 13:24:37 +0200 Subject: [PATCH 12/24] make style & make fix-copies --- .../open_llama/modeling_open_llama.py | 2 +- .../models/falcon/modeling_falcon.py | 24 +++---- .../models/llama/modeling_llama.py | 67 ++++++++++--------- .../models/mistral/modeling_mistral.py | 6 +- .../models/owlv2/modeling_owlv2.py | 1 + .../models/persimmon/modeling_persimmon.py | 1 - 6 files changed, 54 insertions(+), 47 deletions(-) diff --git a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py index 6853f5333f13..3ace323e8224 100644 --- a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py +++ b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py @@ -560,7 +560,7 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embed_tokens = value - # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index e9dca6df9894..de21c9fd1b59 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -76,9 +76,9 @@ def rotate_half(x): # Copied from transformers.models.llama.modeling_llama._get_unpad_data -def _get_unpad_data(padding_mask): - seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32) - indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten() +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( @@ -643,7 +643,7 @@ def forward( # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward def _flash_attention_forward( - self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token @@ -656,7 +656,7 @@ def _flash_attention_forward( Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API - padding_mask (`torch.Tensor`): + attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): @@ -665,10 +665,10 @@ def _flash_attention_forward( The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ # Contains at least one padding token in the sequence - if padding_mask is not None: + if attention_mask is not None and self.attention_mask_cache.has_mask(attention_mask): batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( - query_states, key_states, value_states, padding_mask, query_length + query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens @@ -684,7 +684,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=True, + causal=self.attention_mask_cache.is_causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) @@ -696,8 +696,8 @@ def _flash_attention_forward( return attn_output # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input - def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length): - indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(padding_mask) + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape key_layer = index_first_axis( @@ -722,8 +722,8 @@ def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_l query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. - padding_mask = padding_mask[:, -query_length:] - query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, padding_mask) + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index a0aca90417ad..a94c5717d08e 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -19,6 +19,7 @@ # limitations under the License. """ PyTorch LLaMA model.""" import math +import warnings from typing import List, Optional, Tuple, Union import torch @@ -146,36 +147,21 @@ def to_4d( return self.cache_4d_mask[attention_mask_2d] - # Copied from transformers.models.bart.modeling_bart._make_causal_mask - def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 - ): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz, tgt_len = input_ids_shape - mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) - mask_cond = torch.arange(mask.size(-1), device=device) - mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) - mask = mask.to(dtype) - - if past_key_values_length > 0: - mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) - - # Copied from transformers.models.bart.modeling_bart._expand_mask - def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) if is_flash_attn_2_available(): @@ -427,7 +413,13 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, + **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + bsz, q_len, _ = hidden_states.size() if self.config.pretraining_tp > 1: @@ -538,8 +530,17 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, + **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # LlamaFlashAttention2 attention does not support output_attentions + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + output_attentions = False bsz, q_len, _ = hidden_states.size() @@ -728,12 +729,13 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, + **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. @@ -742,6 +744,10 @@ def forward( (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) residual = hidden_states @@ -755,6 +761,7 @@ def forward( past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, + **kwargs, ) hidden_states = residual + hidden_states diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index cfef5a427118..68b58ce39fa4 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -54,9 +54,9 @@ # Copied from transformers.models.llama.modeling_llama._get_unpad_data -def _get_unpad_data(padding_mask): - seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32) - indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten() +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) return ( diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index 451cc4a69126..97e4ddb4af23 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -1378,6 +1378,7 @@ def normalize_grid_corner_coordinates(self, feature_map: torch.FloatTensor): def objectness_predictor(self, image_features: torch.FloatTensor) -> torch.FloatTensor: """Predicts the probability that each image feature token is an object. + Args: image_features (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_dim)`)): Features extracted from the image. diff --git a/src/transformers/models/persimmon/modeling_persimmon.py b/src/transformers/models/persimmon/modeling_persimmon.py index a0bc57263823..d73cc4484484 100644 --- a/src/transformers/models/persimmon/modeling_persimmon.py +++ b/src/transformers/models/persimmon/modeling_persimmon.py @@ -548,7 +548,6 @@ class PersimmonModel(PersimmonPreTrainedModel): config: PersimmonConfig """ - # Copied from transformers.models.llama.modeling_llama.LlamaModel.__init__ with LLAMA->PERSIMMON,Llama->Persimmon,PersimmonRMSNorm->nn.LayerNorm,norm->final_layernorm,rms_final_layernorm_eps->layer_norm_eps def __init__(self, config: PersimmonConfig): super().__init__(config) self.padding_idx = config.pad_token_id From 068ed57ecd251032c247bbb6f97a89de552ea79c Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 13:29:08 +0200 Subject: [PATCH 13/24] correct more --- .../models/llama/modeling_llama.py | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index a94c5717d08e..fcd97978fedd 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -42,6 +42,28 @@ from .configuration_llama import LlamaConfig +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" + + +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + class AttentionMaskCache: """ A utility attention mask class that allows: @@ -147,43 +169,21 @@ def to_4d( return self.cache_4d_mask[attention_mask_2d] -def _make_causal_mask( - input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 -): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz, tgt_len = input_ids_shape - mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) - mask_cond = torch.arange(mask.size(-1), device=device) - mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) - mask = mask.to(dtype) - - if past_key_values_length > 0: - mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) - - -if is_flash_attn_2_available(): - from flash_attn import flash_attn_func, flash_attn_varlen_func - from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa - - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "LlamaConfig" - - -def _get_unpad_data(attention_mask): - seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) - indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() - max_seqlen_in_batch = seqlens_in_batch.max().item() - cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)) - return ( - indices, - cu_seqlens, - max_seqlen_in_batch, - ) + def _make_causal_mask( + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + ): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) class LlamaRMSNorm(nn.Module): From d18268a80ffc29d9473b44556cc81a0452ccbe9d Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 13:42:16 +0200 Subject: [PATCH 14/24] start moving attention_mask --- .../models/falcon/modeling_falcon.py | 145 ++++++++++++-- .../models/mistral/modeling_mistral.py | 177 ++++++++++++++---- 2 files changed, 270 insertions(+), 52 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index de21c9fd1b59..2a4de97eeb2c 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -88,6 +88,129 @@ def _get_unpad_data(attention_mask): ) +# Copied from transformers.models.llama.modeling_llama.AttentionMaskCache +class AttentionMaskCache: + """ + A utility attention mask class that allows: + - Create a causal mask 4d mask + - Convert a 2D attention mask (batch_size, query_length) to a 4D attention mask (batch_size, 1, query_length, + key_value_length) that can be multiplied with attention scores + - Check whether 2D attention mask has any padding tokens or not + + To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. + """ + + def __init__(self, is_causal: bool): + self.is_causal = is_causal + + self.cache_4d_mask = {} + self.cache_4d_mask_only_causal = {} + self.cache_has_mask = {} + + def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: + """ + Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. + """ + if attention_mask_2d not in self.cache_has_mask and len(self.cache_has_mask) > 0: + self.cache_has_mask = {} + + if attention_mask_2d not in self.cache_has_mask: + self.cache_has_mask[attention_mask_2d] = 0 in attention_mask_2d + + return self.cache_has_mask[attention_mask_2d] + + def to_causal_4d( + self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: torch.device + ) -> torch.Tensor: + """ + Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative + bias to upper right hand triangular matrix (causal mask). If cached 4D attention mask can be reused, no new + memory will be allocated. + """ + expected_shape = (batch_size, 1, query_length, key_value_length) + + # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache + if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: + self.cache_4d_mask_only_causal = {} + + # If shape is not cached, create a new causal mask and cache it + if expected_shape not in self.cache_4d_mask_only_causal: + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 and self.is_causal: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + ) + + self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask + + return self.cache_4d_mask_only_causal[expected_shape] + + def to_4d( + self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype + ) -> torch.Tensor: + """ + Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, + key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is + causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be + allocated. + """ + # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache + if attention_mask_2d not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: + self.cache_4d_mask = {} + + # If attention_mask is not cached, create a new one and cache it + if attention_mask_2d not in self.cache_4d_mask: + input_shape = (attention_mask_2d.shape[0], query_length) + past_key_values_length = key_value_length - query_length + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 and self.is_causal: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + ) + + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device + ) + cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask + + self.cache_4d_mask[attention_mask_2d] = cached_4d_mask + + return self.cache_4d_mask[attention_mask_2d] + + def _make_causal_mask( + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + ): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + # TODO (joao): Is this the same implementation as in Llama? If so, let's make them the same and add the copy facilities class FalconRotaryEmbedding(nn.Module): """Implementation of RotaryEmbedding from GPT-NeoX. @@ -254,6 +377,7 @@ def _expand_mask(mask: torch.Tensor, past_key_values_length: int) -> torch.BoolT def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: + attention_mask = attention_mask if attention_mask is not None batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( @@ -431,7 +555,6 @@ def forward( head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, - padding_mask: Optional[torch.LongTensor] = None, ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads @@ -563,7 +686,6 @@ def forward( head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, - padding_mask: Optional[torch.LongTensor] = None, ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads @@ -630,7 +752,7 @@ def forward( value_layer = value_layer.to(target_dtype) attn_output = self._flash_attention_forward( - query_layer, key_layer, value_layer, padding_mask, query_length, dropout=attn_dropout + query_layer, key_layer, value_layer, attention_mask, query_length, dropout=attn_dropout ) attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) @@ -786,7 +908,6 @@ def forward( head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, - padding_mask: Optional[torch.LongTensor] = None, ): residual = hidden_states @@ -806,7 +927,6 @@ def forward( head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, - padding_mask=padding_mask, ) attention_output = attn_outputs[0] @@ -1114,19 +1234,10 @@ def forward( past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[1] # 1 because RW-cache, not standard format - if attention_mask is None: - attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=hidden_states.device) - padding_mask = None - else: - attention_mask = attention_mask.to(hidden_states.device) - - if 0 in attention_mask: - padding_mask = attention_mask - else: - padding_mask = None if self.use_alibi: - alibi = build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype) + mask = torch.ones(input_ids, shape=(batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long) if attention_mask is None else attention_mask + alibi = build_alibi_tensor(mask, self.num_heads, dtype=hidden_states.dtype) else: alibi = None if position_ids is None: @@ -1162,7 +1273,6 @@ def custom_forward(*inputs): causal_mask, position_ids, head_mask[i], - padding_mask, ) else: outputs = block( @@ -1174,7 +1284,6 @@ def custom_forward(*inputs): use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, - padding_mask=padding_mask, ) hidden_states = outputs[0] diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 68b58ce39fa4..c002d9f5f184 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -53,6 +53,130 @@ _CONFIG_FOR_DOC = "MistralConfig" +# Copied from transformers.models.llama.modeling_llama.AttentionMaskCache +class AttentionMaskCache: + """ + A utility attention mask class that allows: + - Create a causal mask 4d mask + - Convert a 2D attention mask (batch_size, query_length) to a 4D attention mask (batch_size, 1, query_length, + key_value_length) that can be multiplied with attention scores + - Check whether 2D attention mask has any padding tokens or not + + To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. + """ + + def __init__(self, is_causal: bool): + self.is_causal = is_causal + + self.cache_4d_mask = {} + self.cache_4d_mask_only_causal = {} + self.cache_has_mask = {} + + def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: + """ + Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. + """ + if attention_mask_2d not in self.cache_has_mask and len(self.cache_has_mask) > 0: + self.cache_has_mask = {} + + if attention_mask_2d not in self.cache_has_mask: + self.cache_has_mask[attention_mask_2d] = 0 in attention_mask_2d + + return self.cache_has_mask[attention_mask_2d] + + def to_causal_4d( + self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: torch.device + ) -> torch.Tensor: + """ + Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative + bias to upper right hand triangular matrix (causal mask). If cached 4D attention mask can be reused, no new + memory will be allocated. + """ + expected_shape = (batch_size, 1, query_length, key_value_length) + + # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache + if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: + self.cache_4d_mask_only_causal = {} + + # If shape is not cached, create a new causal mask and cache it + if expected_shape not in self.cache_4d_mask_only_causal: + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 and self.is_causal: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + ) + + self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask + + return self.cache_4d_mask_only_causal[expected_shape] + + def to_4d( + self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype + ) -> torch.Tensor: + """ + Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, + key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is + causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be + allocated. + """ + # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache + if attention_mask_2d not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: + self.cache_4d_mask = {} + + # If attention_mask is not cached, create a new one and cache it + if attention_mask_2d not in self.cache_4d_mask: + input_shape = (attention_mask_2d.shape[0], query_length) + past_key_values_length = key_value_length - query_length + + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 and self.is_causal: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + ) + + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device + ) + cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask + + self.cache_4d_mask[attention_mask_2d] = cached_4d_mask + + return self.cache_4d_mask[attention_mask_2d] + + + def _make_causal_mask( + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + ): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) @@ -251,7 +375,6 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - padding_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() @@ -332,7 +455,6 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - padding_mask: Optional[torch.LongTensor] = None, ): bsz, q_len, _ = hidden_states.size() @@ -385,9 +507,9 @@ def forward( past_key_value = (past_key, past_value) - if padding_mask is not None: - padding_mask = padding_mask[:, slicing_tokens:] - padding_mask = torch.cat([padding_mask, torch.ones_like(padding_mask[:, -1:])], dim=-1) + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) @@ -433,7 +555,7 @@ def forward( query_states, key_states, value_states, - padding_mask, + attention_mask, q_len, dropout=dropout_rate, use_sliding_windows=use_sliding_windows, @@ -452,7 +574,7 @@ def _flash_attention_forward( query_states, key_states, value_states, - padding_mask, + attention_mask, query_length, dropout=0.0, softmax_scale=None, @@ -469,7 +591,7 @@ def _flash_attention_forward( Input key states to be passed to Flash Attention API value_states (`torch.Tensor`): Input value states to be passed to Flash Attention API - padding_mask (`torch.Tensor`): + attention_mask (`torch.Tensor`): The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the position of padding tokens and 1 for the position of non-padding tokens. dropout (`int`, *optional*): @@ -480,10 +602,10 @@ def _flash_attention_forward( Whether to activate sliding window attention. """ # Contains at least one padding token in the sequence - if padding_mask is not None: + if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( - query_states, key_states, value_states, padding_mask, query_length + query_states, key_states, value_states, attention_mask, query_length ) cu_seqlens_q, cu_seqlens_k = cu_seq_lens @@ -513,7 +635,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=True, + causal=self.attention_mask_cache.is_causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) @@ -536,16 +658,16 @@ def _flash_attention_forward( return attn_output - def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length): + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape # On the first iteration we need to properly re-create the padding mask # by slicing it on the proper place - if kv_seq_len != padding_mask.shape[-1]: - padding_mask_num_tokens = padding_mask.shape[-1] - padding_mask = padding_mask[:, padding_mask_num_tokens - kv_seq_len :] + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] - indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(padding_mask) + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) @@ -566,8 +688,8 @@ def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_l query_layer = query_layer.squeeze(1) else: # The -q_len: slice assumes left padding. - padding_mask = padding_mask[:, -query_length:] - query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, padding_mask) + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) return ( query_layer, @@ -600,7 +722,6 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, - padding_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: @@ -628,7 +749,6 @@ def forward( past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, - padding_mask=padding_mask, ) hidden_states = residual + hidden_states @@ -865,22 +985,12 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) - padding_mask = None - - # embed positions - if attention_mask is None: - attention_mask = torch.ones( - (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device - ) - elif 0 in attention_mask: - padding_mask = attention_mask - if ( - padding_mask is not None + attention_mask is not None and hasattr(self.config, "_flash_attn_2_enabled") and self.config._flash_attn_2_enabled ): - is_padding_right = padding_mask[:, -1].sum().item() != batch_size + is_padding_right = attention_mask[:, -1].sum().item() != batch_size if is_padding_right: raise ValueError( "You are attempting to perform batched generation with padding_side='right'" @@ -921,7 +1031,7 @@ def forward( def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value - return module(*inputs, past_key_value, output_attentions, padding_mask=padding_mask) + return module(*inputs, past_key_value, output_attentions) return custom_forward @@ -939,7 +1049,6 @@ def custom_forward(*inputs): past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, - padding_mask=padding_mask, ) hidden_states = layer_outputs[0] From 4a99e4362047ae3e417888c49160bd52dbdef8bd Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 15:26:13 +0200 Subject: [PATCH 15/24] fix llama --- src/transformers/models/llama/modeling_llama.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index fcd97978fedd..85ca9426352d 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -185,6 +185,19 @@ def _make_causal_mask( mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): From 2fe66a0a45729abc640b0558ca953f5c18081610 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 15:32:52 +0200 Subject: [PATCH 16/24] improve falcon --- .../models/falcon/modeling_falcon.py | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 2a4de97eeb2c..b126f44e9ae2 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -377,7 +377,6 @@ def _expand_mask(mask: torch.Tensor, past_key_values_length: int) -> torch.BoolT def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: - attention_mask = attention_mask if attention_mask is not None batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( @@ -426,7 +425,7 @@ def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: class FalconAttention(nn.Module): - def __init__(self, config: FalconConfig): + def __init__(self, config: FalconConfig, attention_mask_cache=None): super().__init__() self.config = config @@ -435,6 +434,7 @@ def __init__(self, config: FalconConfig): self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout + self.attention_mask_cache = attention_mask_cache if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( @@ -588,8 +588,14 @@ def forward( else: present = None - float_min = torch.finfo(query_layer.dtype).min - attention_mask_float = (attention_mask * 1.0).masked_fill(attention_mask, float_min).to(query_layer.dtype) + if attention_mask is not None: + # convert 2d -> 4d. Re-use cached mask if available + attention_mask = self.attention_mask_cache.to_4d(attention_mask, query_length, kv_length, query_layer.dtype) + elif self.attention_mask_cache.is_causal: + # create 4d causal mask. Re-use cached mask if available + attention_mask = self.attention_mask_cache.to_causal_4d( + batch_size, query_length, kv_length, query_layer.dtype, query_layer.device + ) query_layer_ = query_layer.reshape(batch_size, self.num_heads, -1, self.head_dim) key_layer_ = key_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim) @@ -605,7 +611,7 @@ def forward( ) attn_output = F.scaled_dot_product_attention( - query_layer_, key_layer_, value_layer_, attention_mask_float, 0.0, is_causal=False + query_layer_, key_layer_, value_layer_, attention_mask, 0.0, is_causal=False ) attention_scores = None else: @@ -613,7 +619,7 @@ def forward( attention_scores /= math.sqrt(self.head_dim) attention_scores = F.softmax( - attention_scores + attention_mask_float, dim=-1, dtype=hidden_states.dtype + attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype ) attn_output = attention_scores @ value_layer_ @@ -640,12 +646,12 @@ def forward( if input_dtype == torch.float16 or input_dtype == torch.bfloat16: attention_scores = attention_scores.to(torch.float32) # Matt (HF) note: We could possibly use F.scaled_dot_product_attention here too, by - # adding (alibi * self.inv_norm_factor) to attention_mask_float. I think this would be mathematically + # adding (alibi * self.inv_norm_factor) to attention_mask. I think this would be mathematically # equivalent and more performant, but there might be a numerical difference. If you're reading this # and you'd like to experiment and maybe file a PR, feel free! attention_logits = attention_scores + alibi.view(batch_size, self.num_heads, 1, -1) attention_logits *= self.inv_norm_factor - attention_probs = F.softmax(attention_logits + attention_mask_float, dim=-1, dtype=hidden_states.dtype) + attention_probs = F.softmax(attention_logits + attention_mask, dim=-1, dtype=hidden_states.dtype) # [batch_size, num_heads, q_length, kv_length] attention_probs = self.attention_dropout(attention_probs) @@ -874,15 +880,15 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class FalconDecoderLayer(nn.Module): - def __init__(self, config: FalconConfig): + def __init__(self, config: FalconConfig, attention_mask_cache=None): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = ( - FalconAttention(config) + FalconAttention(config, attention_mask_cache=attention_mask_cache) if not getattr(config, "_flash_attn_2_enabled", False) - else FalconFlashAttention2(config) + else FalconFlashAttention2(config, attention_mask_cache=attention_mask_cache) ) self.mlp = FalconMLP(config) self.hidden_dropout = config.hidden_dropout @@ -1121,8 +1127,12 @@ def __init__(self, config: FalconConfig): # Embedding + LN Embedding self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) + # create attention mask cache that trickles down to each attention layer + # so that the attention_mask cache can be shared among layers + attention_mask_cache = AttentionMaskCache(is_causal=True) + # Transformer blocks - self.h = nn.ModuleList([FalconDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.h = nn.ModuleList([FalconDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)]) # Final Layer Norm self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) From d03d8a141280f26d3643e29f1a1f116f08c5e493 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 13:57:44 +0000 Subject: [PATCH 17/24] up --- .../models/falcon/modeling_falcon.py | 43 +++++++++++++------ .../models/mistral/modeling_mistral.py | 40 ++++++++++++++--- 2 files changed, 63 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index b126f44e9ae2..733c913e7710 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -210,6 +210,19 @@ def _make_causal_mask( mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + # TODO (joao): Is this the same implementation as in Llama? If so, let's make them the same and add the copy facilities class FalconRotaryEmbedding(nn.Module): @@ -590,7 +603,9 @@ def forward( if attention_mask is not None: # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_4d(attention_mask, query_length, kv_length, query_layer.dtype) + attention_mask = self.attention_mask_cache.to_4d( + attention_mask, query_length, kv_length, query_layer.dtype + ) elif self.attention_mask_cache.is_causal: # create 4d causal mask. Re-use cached mask if available attention_mask = self.attention_mask_cache.to_causal_4d( @@ -618,9 +633,7 @@ def forward( attention_scores = query_layer_ @ key_layer_.transpose(-1, -2) attention_scores /= math.sqrt(self.head_dim) - attention_scores = F.softmax( - attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype - ) + attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype) attn_output = attention_scores @ value_layer_ attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim) @@ -1132,7 +1145,9 @@ def __init__(self, config: FalconConfig): attention_mask_cache = AttentionMaskCache(is_causal=True) # Transformer blocks - self.h = nn.ModuleList([FalconDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)]) + self.h = nn.ModuleList( + [FalconDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)] + ) # Final Layer Norm self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) @@ -1246,7 +1261,13 @@ def forward( past_key_values_length = past_key_values[0][0].shape[1] # 1 because RW-cache, not standard format if self.use_alibi: - mask = torch.ones(input_ids, shape=(batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long) if attention_mask is None else attention_mask + mask = ( + torch.ones( + (batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long + ) + if attention_mask is None + else attention_mask + ) alibi = build_alibi_tensor(mask, self.num_heads, dtype=hidden_states.dtype) else: alibi = None @@ -1257,12 +1278,6 @@ def forward( ) position_ids = position_ids.unsqueeze(0) - causal_mask = self._prepare_attn_mask( - attention_mask, - input_shape=(batch_size, seq_length), - past_key_values_length=past_key_values_length, - ) - for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) @@ -1280,7 +1295,7 @@ def custom_forward(*inputs): create_custom_forward(block), hidden_states, alibi, - causal_mask, + attention_mask, position_ids, head_mask[i], ) @@ -1288,7 +1303,7 @@ def custom_forward(*inputs): outputs = block( hidden_states, layer_past=layer_past, - attention_mask=causal_mask, + attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index c002d9f5f184..596504869240 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -159,7 +159,6 @@ def to_4d( return self.cache_4d_mask[attention_mask_2d] - def _make_causal_mask( self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): @@ -176,6 +175,19 @@ def _make_causal_mask( mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + # Copied from transformers.models.llama.modeling_llama._get_unpad_data def _get_unpad_data(attention_mask): @@ -337,7 +349,7 @@ class MistralAttention(nn.Module): and "Generating Long Sequences with Sparse Transformers". """ - def __init__(self, config: MistralConfig): + def __init__(self, config: MistralConfig, attention_mask_cache=None): super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -347,6 +359,7 @@ def __init__(self, config: MistralConfig): self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta + self.attention_mask_cache = attention_mask_cache if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -411,6 +424,15 @@ def forward( f" {attn_weights.size()}" ) + if attention_mask is not None: + # convert 2d -> 4d. Re-use cached mask if available + attention_mask = self.attention_mask_cache.to_4d(attention_mask, q_len, kv_seq_len, attn_weights.dtype) + elif self.attention_mask_cache.is_causal: + # create 4d causal mask. Re-use cached mask if available + attention_mask = self.attention_mask_cache.to_causal_4d( + bsz, q_len, kv_seq_len, attn_weights.dtype, attn_weights.device + ) + if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( @@ -702,13 +724,13 @@ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query class MistralDecoderLayer(nn.Module): - def __init__(self, config: MistralConfig): + def __init__(self, config: MistralConfig, attention_mask_cache=None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ( - MistralAttention(config=config) + MistralAttention(config=config, attention_mask_cache=attention_mask_cache) if not getattr(config, "_flash_attn_2_enabled", False) - else MistralFlashAttention2(config) + else MistralFlashAttention2(config, attention_mask_cache=attention_mask_cache) ) self.mlp = MistralMLP(config) self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -895,8 +917,14 @@ def __init__(self, config: MistralConfig): self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size + # create attention mask cache that trickles down to each attention layer + # so that the attention_mask cache can be shared among layers + attention_mask_cache = AttentionMaskCache(is_causal=True) + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.layers = nn.ModuleList([MistralDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.layers = nn.ModuleList( + [MistralDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)] + ) self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False From 9e553ac7c7754fb5b8717b6e09698943b8e1bcff Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 18:42:45 +0000 Subject: [PATCH 18/24] improve more --- .../models/falcon/modeling_falcon.py | 60 ++++-- .../models/llama/modeling_llama.py | 60 ++++-- .../models/mistral/modeling_mistral.py | 60 ++++-- tests/models/llama/test_modeling_llama.py | 181 +++++++++++++++++- 4 files changed, 318 insertions(+), 43 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 733c913e7710..077ca5e4b32b 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -98,29 +98,43 @@ class AttentionMaskCache: - Check whether 2D attention mask has any padding tokens or not To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. + + Parameters: + is_causal (`bool`): + Whether the attention mask should be a uni-directional (causal) or bi-directional mask. + + sliding_window (`int`, *optional*): + Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ - def __init__(self, is_causal: bool): + def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal + self.sliding_window = sliding_window self.cache_4d_mask = {} self.cache_4d_mask_only_causal = {} self.cache_has_mask = {} + def _hash_tensor(self, tensor: torch.Tensor, shape: Tuple[int] = ()): + # we need to use both the unique id, memory address, the _version, and shape of the tensor as a key + # object to be certain to not accidentally return an incorrect hashed key (e.g. if the tensor has been updated in-place, only the version is increased) + return (id(tensor), tensor._version, tensor.shape + shape) + def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: """ Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. """ - if attention_mask_2d not in self.cache_has_mask and len(self.cache_has_mask) > 0: + mask_2d_hash = self._hash_tensor(attention_mask_2d) + if mask_2d_hash not in self.cache_has_mask and len(self.cache_has_mask) > 0: self.cache_has_mask = {} - if attention_mask_2d not in self.cache_has_mask: - self.cache_has_mask[attention_mask_2d] = 0 in attention_mask_2d + if mask_2d_hash not in self.cache_has_mask: + self.cache_has_mask[mask_2d_hash] = 0 in attention_mask_2d - return self.cache_has_mask[attention_mask_2d] + return self.cache_has_mask[mask_2d_hash] def to_causal_4d( - self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: torch.device + self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, "str"] = "cpu" ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative @@ -129,6 +143,9 @@ def to_causal_4d( """ expected_shape = (batch_size, 1, query_length, key_value_length) + if not self.is_causal: + raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") + # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: self.cache_4d_mask_only_causal = {} @@ -141,13 +158,14 @@ def to_causal_4d( # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None - if input_shape[-1] > 1 and self.is_causal: + if input_shape[-1] > 1 or self.sliding_window is not None: past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=device, past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask @@ -155,7 +173,7 @@ def to_causal_4d( return self.cache_4d_mask_only_causal[expected_shape] def to_4d( - self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype + self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, @@ -163,26 +181,31 @@ def to_4d( causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be allocated. """ + mask_2d_hash = self._hash_tensor(attention_mask_2d, (query_length, key_value_length)) + # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if attention_mask_2d not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: + if mask_2d_hash not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: self.cache_4d_mask = {} # If attention_mask is not cached, create a new one and cache it - if attention_mask_2d not in self.cache_4d_mask: + if mask_2d_hash not in self.cache_4d_mask: input_shape = (attention_mask_2d.shape[0], query_length) past_key_values_length = key_value_length - query_length # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None - if input_shape[-1] > 1 and self.is_causal: + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( @@ -190,12 +213,12 @@ def to_4d( ) cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - self.cache_4d_mask[attention_mask_2d] = cached_4d_mask + self.cache_4d_mask[mask_2d_hash] = cached_4d_mask - return self.cache_4d_mask[attention_mask_2d] + return self.cache_4d_mask[mask_2d_hash] def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None ): """ Make causal mask used for bi-directional self-attention. @@ -204,10 +227,19 @@ def _make_causal_mask( mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + + # add lower triangular sliding window mask if necessary + if sliding_window is not None: + diagonal = past_key_values_length - sliding_window + 1 + + context_mask = (1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)) + mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 85ca9426352d..43d8a673b59d 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -73,29 +73,43 @@ class AttentionMaskCache: - Check whether 2D attention mask has any padding tokens or not To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. + + Parameters: + is_causal (`bool`): + Whether the attention mask should be a uni-directional (causal) or bi-directional mask. + + sliding_window (`int`, *optional*): + Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ - def __init__(self, is_causal: bool): + def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal + self.sliding_window = sliding_window self.cache_4d_mask = {} self.cache_4d_mask_only_causal = {} self.cache_has_mask = {} + def _hash_tensor(self, tensor: torch.Tensor, shape: Tuple[int] = ()): + # we need to use both the unique id, memory address, the _version, and shape of the tensor as a key + # object to be certain to not accidentally return an incorrect hashed key (e.g. if the tensor has been updated in-place, only the version is increased) + return (id(tensor), tensor._version, tensor.shape + shape) + def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: """ Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. """ - if attention_mask_2d not in self.cache_has_mask and len(self.cache_has_mask) > 0: + mask_2d_hash = self._hash_tensor(attention_mask_2d) + if mask_2d_hash not in self.cache_has_mask and len(self.cache_has_mask) > 0: self.cache_has_mask = {} - if attention_mask_2d not in self.cache_has_mask: - self.cache_has_mask[attention_mask_2d] = 0 in attention_mask_2d + if mask_2d_hash not in self.cache_has_mask: + self.cache_has_mask[mask_2d_hash] = 0 in attention_mask_2d - return self.cache_has_mask[attention_mask_2d] + return self.cache_has_mask[mask_2d_hash] def to_causal_4d( - self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: torch.device + self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, "str"] = "cpu" ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative @@ -104,6 +118,9 @@ def to_causal_4d( """ expected_shape = (batch_size, 1, query_length, key_value_length) + if not self.is_causal: + raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") + # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: self.cache_4d_mask_only_causal = {} @@ -116,13 +133,14 @@ def to_causal_4d( # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None - if input_shape[-1] > 1 and self.is_causal: + if input_shape[-1] > 1 or self.sliding_window is not None: past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=device, past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask @@ -130,7 +148,7 @@ def to_causal_4d( return self.cache_4d_mask_only_causal[expected_shape] def to_4d( - self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype + self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, @@ -138,26 +156,31 @@ def to_4d( causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be allocated. """ + mask_2d_hash = self._hash_tensor(attention_mask_2d, (query_length, key_value_length)) + # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if attention_mask_2d not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: + if mask_2d_hash not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: self.cache_4d_mask = {} # If attention_mask is not cached, create a new one and cache it - if attention_mask_2d not in self.cache_4d_mask: + if mask_2d_hash not in self.cache_4d_mask: input_shape = (attention_mask_2d.shape[0], query_length) past_key_values_length = key_value_length - query_length # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None - if input_shape[-1] > 1 and self.is_causal: + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( @@ -165,12 +188,12 @@ def to_4d( ) cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - self.cache_4d_mask[attention_mask_2d] = cached_4d_mask + self.cache_4d_mask[mask_2d_hash] = cached_4d_mask - return self.cache_4d_mask[attention_mask_2d] + return self.cache_4d_mask[mask_2d_hash] def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None ): """ Make causal mask used for bi-directional self-attention. @@ -179,10 +202,19 @@ def _make_causal_mask( mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + + # add lower triangular sliding window mask if necessary + if sliding_window is not None: + diagonal = past_key_values_length - sliding_window + 1 + + context_mask = (1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)) + mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 596504869240..d3eae5113eee 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -63,29 +63,43 @@ class AttentionMaskCache: - Check whether 2D attention mask has any padding tokens or not To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. + + Parameters: + is_causal (`bool`): + Whether the attention mask should be a uni-directional (causal) or bi-directional mask. + + sliding_window (`int`, *optional*): + Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer. """ - def __init__(self, is_causal: bool): + def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal + self.sliding_window = sliding_window self.cache_4d_mask = {} self.cache_4d_mask_only_causal = {} self.cache_has_mask = {} + def _hash_tensor(self, tensor: torch.Tensor, shape: Tuple[int] = ()): + # we need to use both the unique id, memory address, the _version, and shape of the tensor as a key + # object to be certain to not accidentally return an incorrect hashed key (e.g. if the tensor has been updated in-place, only the version is increased) + return (id(tensor), tensor._version, tensor.shape + shape) + def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: """ Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. """ - if attention_mask_2d not in self.cache_has_mask and len(self.cache_has_mask) > 0: + mask_2d_hash = self._hash_tensor(attention_mask_2d) + if mask_2d_hash not in self.cache_has_mask and len(self.cache_has_mask) > 0: self.cache_has_mask = {} - if attention_mask_2d not in self.cache_has_mask: - self.cache_has_mask[attention_mask_2d] = 0 in attention_mask_2d + if mask_2d_hash not in self.cache_has_mask: + self.cache_has_mask[mask_2d_hash] = 0 in attention_mask_2d - return self.cache_has_mask[attention_mask_2d] + return self.cache_has_mask[mask_2d_hash] def to_causal_4d( - self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: torch.device + self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, "str"] = "cpu" ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative @@ -94,6 +108,9 @@ def to_causal_4d( """ expected_shape = (batch_size, 1, query_length, key_value_length) + if not self.is_causal: + raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") + # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: self.cache_4d_mask_only_causal = {} @@ -106,13 +123,14 @@ def to_causal_4d( # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None - if input_shape[-1] > 1 and self.is_causal: + if input_shape[-1] > 1 or self.sliding_window is not None: past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=device, past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask @@ -120,7 +138,7 @@ def to_causal_4d( return self.cache_4d_mask_only_causal[expected_shape] def to_4d( - self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype + self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, @@ -128,26 +146,31 @@ def to_4d( causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be allocated. """ + mask_2d_hash = self._hash_tensor(attention_mask_2d, (query_length, key_value_length)) + # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if attention_mask_2d not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: + if mask_2d_hash not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: self.cache_4d_mask = {} # If attention_mask is not cached, create a new one and cache it - if attention_mask_2d not in self.cache_4d_mask: + if mask_2d_hash not in self.cache_4d_mask: input_shape = (attention_mask_2d.shape[0], query_length) past_key_values_length = key_value_length - query_length # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] causal_4d_mask = None - if input_shape[-1] > 1 and self.is_causal: + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask( input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( @@ -155,12 +178,12 @@ def to_4d( ) cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - self.cache_4d_mask[attention_mask_2d] = cached_4d_mask + self.cache_4d_mask[mask_2d_hash] = cached_4d_mask - return self.cache_4d_mask[attention_mask_2d] + return self.cache_4d_mask[mask_2d_hash] def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 + self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None ): """ Make causal mask used for bi-directional self-attention. @@ -169,10 +192,19 @@ def _make_causal_mask( mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + + # add lower triangular sliding window mask if necessary + if sliding_window is not None: + diagonal = past_key_values_length - sliding_window + 1 + + context_mask = (1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)) + mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(self, mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index 2402986900fd..e23d3a7edea6 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -39,7 +39,186 @@ LlamaModel, LlamaTokenizer, ) - + from transformers.models.llama.modeling_llama import AttentionMaskCache + + +class AttentionMaskTester(unittest.TestCase): + + def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): + mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) + mask_4d_values = mask_4d[:, 0][mask_indices] + is_inf = (mask_4d_values == -float("inf")) + is_min = (mask_4d_values == torch.finfo(mask_4d.dtype).min) + assert torch.logical_or(is_inf, is_min).all() + + def check_to_4d(self, mask_cache, q_len, kv_len, additional_mask=None, bsz=3): + mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) + + if additional_mask is not None: + for bsz_idx, seq_idx in additional_mask: + mask_2d[bsz_idx, seq_idx] = 0 + + mask_4d = mask_cache.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len) + + # check cache + hash_key = mask_cache._hash_tensor(mask_2d, (q_len, kv_len)) + assert hash_key in mask_cache.cache_4d_mask + assert mask_cache.cache_4d_mask[hash_key] is mask_4d + + assert mask_4d.shape == (bsz, 1, q_len, kv_len) + + context = mask_cache.sliding_window + if mask_cache.is_causal and context is None: + # k * (k+1) / 2 tokens are masked in triangualar masks + num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) + + if 0 not in mask_2d: + assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked + if 0 in mask_2d: + # at least causal mask + maybe more + assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked + self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) + elif not mask_cache.is_causal and context is None: + if 0 not in mask_2d: + assert (mask_4d != 0).sum().cpu().item() == 0 + if 0 in mask_2d: + self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) + elif mask_cache.is_causal and context is not None: + # k * (k+1) / 2 tokens are masked in triangualar masks + num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) + num_tokens_masked = bsz * num_tokens_masked + + if 0 not in mask_2d: + assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked + if 0 in mask_2d: + # at least causal mask + maybe more + assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked + self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) + + def check_to_causal(self, mask_cache, q_len, kv_len, bsz=3): + mask_4d = mask_cache.to_causal_4d(bsz, query_length=q_len, key_value_length=kv_len, device=torch_device) + + if q_len == 1 and mask_cache.sliding_window is None: + # no causal mask if q_len is 1 + assert mask_4d is None + return + + # check cache + mask_2d_shape = (bsz, 1, q_len, kv_len) + assert mask_2d_shape in mask_cache.cache_4d_mask_only_causal + assert mask_cache.cache_4d_mask_only_causal[mask_2d_shape] is mask_4d + + context = mask_cache.sliding_window + if mask_cache.is_causal and context is None: + # k * (k+1) / 2 tokens are masked in triangualar masks + num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) + + assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked + elif not mask_cache.is_causal and context is None: + assert (mask_4d != 0).sum().cpu().item() == 0 + elif mask_cache.is_causal and context is not None: + # k * (k+1) / 2 tokens are masked in triangualar masks + num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) + num_tokens_masked = bsz * num_tokens_masked + + assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked + + def compute_num_context_mask(self, kv_len, context, q_len): + # This function computes the # of attention tokens that are added for + # the sliding window + c_mask_len = kv_len - context + num_mask_triangle = (c_mask_len * (c_mask_len + 1) // 2) + cut_mask_len = max(c_mask_len - q_len, 0) + num_cut_mask = (cut_mask_len * (cut_mask_len + 1) // 2) + return num_mask_triangle - num_cut_mask + + + def test_2d_to_4d_causal(self): + mask_cache = AttentionMaskCache(is_causal=True) + + # auto-regressive use case + self.check_to_4d(mask_cache, q_len=1, kv_len=7) + # special auto-regressive case + self.check_to_4d(mask_cache, q_len=3, kv_len=7) + # non auto-regressive case + self.check_to_4d(mask_cache, q_len=7, kv_len=7) + + # same with extra attention masks + self.check_to_4d(mask_cache, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_cache, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + + def test_2d_to_4d(self): + mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) + mask_cache = AttentionMaskCache(is_causal=False) + + # non auto-regressive case + self.check_to_4d(mask_cache, q_len=7, kv_len=7) + + # same with extra attention masks + self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + + def test_2d_to_4d_causal_sliding(self): + mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) + mask_cache = AttentionMaskCache(is_causal=True, sliding_window=5) + + # auto-regressive use case + self.check_to_4d(mask_cache, q_len=1, kv_len=7) + # special auto-regressive case + self.check_to_4d(mask_cache, q_len=3, kv_len=7) + # non auto-regressive case + self.check_to_4d(mask_cache, q_len=7, kv_len=7) + + # same with extra attention masks + self.check_to_4d(mask_cache, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_cache, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + + def test_causal_mask(self): + mask_cache = AttentionMaskCache(is_causal=True) + + # auto-regressive use case + self.check_to_causal(mask_cache, q_len=1, kv_len=7) + # special auto-regressive case + self.check_to_causal(mask_cache, q_len=3, kv_len=7) + # non auto-regressive case + self.check_to_causal(mask_cache, q_len=7, kv_len=7) + + def test_causal_mask_sliding(self): + mask_cache = AttentionMaskCache(is_causal=True, sliding_window=3) + + # auto-regressive use case + self.check_to_causal(mask_cache, q_len=1, kv_len=7) + # special auto-regressive case + self.check_to_causal(mask_cache, q_len=3, kv_len=7) + # non auto-regressive case + self.check_to_causal(mask_cache, q_len=7, kv_len=7) + + def test_has_mask(self): + mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) + mask_cache = AttentionMaskCache(False) + + assert not mask_cache.has_mask(mask_2d) + hash_key = mask_cache._hash_tensor(mask_2d) + assert hash_key in mask_cache.cache_has_mask + + mask_2d[1, 1] = 0 + assert mask_cache.has_mask(mask_2d) + hash_key = mask_cache._hash_tensor(mask_2d) + assert hash_key in mask_cache.cache_has_mask + + def test_in_place_tensor_is_cached(self): + mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) + mask_cache = AttentionMaskCache(False) + + _ = mask_cache.to_4d(mask_2d, 3, 7) + hash_key = mask_cache._hash_tensor(mask_2d, (3, 7)) + assert hash_key in mask_cache.cache_4d_mask + + # make sure in-place change is noticed + mask_2d[0, 3] = 4 + hash_key = mask_cache._hash_tensor(mask_2d, (3, 7)) + assert hash_key not in mask_cache.cache_4d_mask class LlamaModelTester: def __init__( From 431b3a83e09a743ade6317b2e39ba1a1b547f3ee Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 18:49:53 +0000 Subject: [PATCH 19/24] improve more --- .../models/mistral/modeling_mistral.py | 65 +------------------ 1 file changed, 2 insertions(+), 63 deletions(-) diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index d3eae5113eee..5e877fe31df3 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -234,33 +234,6 @@ def _get_unpad_data(attention_mask): ) -def _make_sliding_window_causal_mask( - input_ids_shape: torch.Size, - dtype: torch.dtype, - device: torch.device, - past_key_values_length: int = 0, - sliding_window: int = 4096, -): - """ - Make causal mask used for sliding window attention - """ - bsz, tgt_len = input_ids_shape - - tensor = torch.full( - (tgt_len, tgt_len), - fill_value=1, - device=device, - ) - mask = torch.tril(tensor, diagonal=0) - # make the mask banded to account for sliding window - mask = torch.triu(mask, diagonal=-sliding_window) - mask = torch.log(mask).to(dtype) - - if past_key_values_length > 0: - mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) - - # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ @@ -676,7 +649,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=True, + causal=self.attention_mask_cache.is_causal, ) else: attn_output_unpad = flash_attn_varlen_func( @@ -951,7 +924,7 @@ def __init__(self, config: MistralConfig): # create attention mask cache that trickles down to each attention layer # so that the attention_mask cache can be shared among layers - attention_mask_cache = AttentionMaskCache(is_causal=True) + attention_mask_cache = AttentionMaskCache(is_causal=True, sliding_window=config.sliding_window) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( @@ -969,32 +942,6 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.embed_tokens = value - def _prepare_decoder_attention_mask( - self, attention_mask, input_shape, inputs_embeds, past_key_values_length, sliding_window - ): - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - combined_attention_mask = None - if input_shape[-1] > 1: - combined_attention_mask = _make_sliding_window_causal_mask( - input_shape, - inputs_embeds.dtype, - device=inputs_embeds.device, - past_key_values_length=past_key_values_length, - sliding_window=sliding_window, - ) - - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( - inputs_embeds.device - ) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask - ) - - return combined_attention_mask - @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) def forward( self, @@ -1058,14 +1005,6 @@ def forward( " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) - attention_mask = self._prepare_decoder_attention_mask( - attention_mask, - (batch_size, seq_length), - inputs_embeds, - past_key_values_length, - sliding_window=self.config.sliding_window, - ) - hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: From f8b2e4e7a6ed798d886d9cf2ca12e1daa9efa9b5 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 20:50:49 +0200 Subject: [PATCH 20/24] Update src/transformers/models/owlv2/modeling_owlv2.py --- src/transformers/models/owlv2/modeling_owlv2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index 97e4ddb4af23..451cc4a69126 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -1378,7 +1378,6 @@ def normalize_grid_corner_coordinates(self, feature_map: torch.FloatTensor): def objectness_predictor(self, image_features: torch.FloatTensor) -> torch.FloatTensor: """Predicts the probability that each image feature token is an object. - Args: image_features (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_dim)`)): Features extracted from the image. From 0338ffeabbd00c44389395c294c3c0a3192f070d Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 19:19:01 +0000 Subject: [PATCH 21/24] make style --- .../models/falcon/modeling_falcon.py | 22 +++++++++++++++---- .../models/llama/modeling_llama.py | 22 +++++++++++++++---- .../models/mistral/modeling_mistral.py | 22 +++++++++++++++---- tests/models/llama/test_modeling_llama.py | 16 +++++++------- 4 files changed, 62 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 077ca5e4b32b..197a320b7586 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -134,7 +134,12 @@ def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: return self.cache_has_mask[mask_2d_hash] def to_causal_4d( - self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, "str"] = "cpu" + self, + batch_size: int, + query_length: int, + key_value_length: int, + dtype: torch.dtype = torch.float32, + device: Union[torch.device, "str"] = "cpu", ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative @@ -173,7 +178,11 @@ def to_causal_4d( return self.cache_4d_mask_only_causal[expected_shape] def to_4d( - self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32 + self, + attention_mask_2d: torch.Tensor, + query_length: int, + key_value_length: int, + dtype: torch.dtype = torch.float32, ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, @@ -218,7 +227,12 @@ def to_4d( return self.cache_4d_mask[mask_2d_hash] def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None + self, + input_ids_shape: torch.Size, + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0, + sliding_window: Optional[int] = None, ): """ Make causal mask used for bi-directional self-attention. @@ -237,7 +251,7 @@ def _make_causal_mask( if sliding_window is not None: diagonal = past_key_values_length - sliding_window + 1 - context_mask = (1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)) + context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal) mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 43d8a673b59d..0053a7114e17 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -109,7 +109,12 @@ def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: return self.cache_has_mask[mask_2d_hash] def to_causal_4d( - self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, "str"] = "cpu" + self, + batch_size: int, + query_length: int, + key_value_length: int, + dtype: torch.dtype = torch.float32, + device: Union[torch.device, "str"] = "cpu", ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative @@ -148,7 +153,11 @@ def to_causal_4d( return self.cache_4d_mask_only_causal[expected_shape] def to_4d( - self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32 + self, + attention_mask_2d: torch.Tensor, + query_length: int, + key_value_length: int, + dtype: torch.dtype = torch.float32, ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, @@ -193,7 +202,12 @@ def to_4d( return self.cache_4d_mask[mask_2d_hash] def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None + self, + input_ids_shape: torch.Size, + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0, + sliding_window: Optional[int] = None, ): """ Make causal mask used for bi-directional self-attention. @@ -212,7 +226,7 @@ def _make_causal_mask( if sliding_window is not None: diagonal = past_key_values_length - sliding_window + 1 - context_mask = (1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)) + context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal) mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 5e877fe31df3..3a6a642806ee 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -99,7 +99,12 @@ def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: return self.cache_has_mask[mask_2d_hash] def to_causal_4d( - self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32, device: Union[torch.device, "str"] = "cpu" + self, + batch_size: int, + query_length: int, + key_value_length: int, + dtype: torch.dtype = torch.float32, + device: Union[torch.device, "str"] = "cpu", ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative @@ -138,7 +143,11 @@ def to_causal_4d( return self.cache_4d_mask_only_causal[expected_shape] def to_4d( - self, attention_mask_2d: torch.Tensor, query_length: int, key_value_length: int, dtype: torch.dtype = torch.float32 + self, + attention_mask_2d: torch.Tensor, + query_length: int, + key_value_length: int, + dtype: torch.dtype = torch.float32, ) -> torch.Tensor: """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, @@ -183,7 +192,12 @@ def to_4d( return self.cache_4d_mask[mask_2d_hash] def _make_causal_mask( - self, input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, sliding_window: Optional[int] = None + self, + input_ids_shape: torch.Size, + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0, + sliding_window: Optional[int] = None, ): """ Make causal mask used for bi-directional self-attention. @@ -202,7 +216,7 @@ def _make_causal_mask( if sliding_window is not None: diagonal = past_key_values_length - sliding_window + 1 - context_mask = (1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal)) + context_mask = 1 - torch.triu(torch.ones_like(mask, dtype=torch.int), diagonal=diagonal) mask.masked_fill_(context_mask.bool(), torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index e23d3a7edea6..258e729baf2d 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -42,13 +42,13 @@ from transformers.models.llama.modeling_llama import AttentionMaskCache +@require_torch class AttentionMaskTester(unittest.TestCase): - def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) mask_4d_values = mask_4d[:, 0][mask_indices] - is_inf = (mask_4d_values == -float("inf")) - is_min = (mask_4d_values == torch.finfo(mask_4d.dtype).min) + is_inf = mask_4d_values == -float("inf") + is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() def check_to_4d(self, mask_cache, q_len, kv_len, additional_mask=None, bsz=3): @@ -127,12 +127,11 @@ def compute_num_context_mask(self, kv_len, context, q_len): # This function computes the # of attention tokens that are added for # the sliding window c_mask_len = kv_len - context - num_mask_triangle = (c_mask_len * (c_mask_len + 1) // 2) + num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2 cut_mask_len = max(c_mask_len - q_len, 0) - num_cut_mask = (cut_mask_len * (cut_mask_len + 1) // 2) + num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2 return num_mask_triangle - num_cut_mask - def test_2d_to_4d_causal(self): mask_cache = AttentionMaskCache(is_causal=True) @@ -149,7 +148,7 @@ def test_2d_to_4d_causal(self): self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d(self): - mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) + torch.ones((3, 7), device=torch_device, dtype=torch.long) mask_cache = AttentionMaskCache(is_causal=False) # non auto-regressive case @@ -159,7 +158,7 @@ def test_2d_to_4d(self): self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): - mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) + torch.ones((3, 7), device=torch_device, dtype=torch.long) mask_cache = AttentionMaskCache(is_causal=True, sliding_window=5) # auto-regressive use case @@ -220,6 +219,7 @@ def test_in_place_tensor_is_cached(self): hash_key = mask_cache._hash_tensor(mask_2d, (3, 7)) assert hash_key not in mask_cache.cache_4d_mask + class LlamaModelTester: def __init__( self, From ae3eb2e72aa0c0e8fad8df66e0b4199168c98619 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 19:40:40 +0000 Subject: [PATCH 22/24] make style --- .../models/falcon/modeling_falcon.py | 23 +++++++++++++++++++ .../models/mistral/modeling_mistral.py | 21 ++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 197a320b7586..468aa2b18ac8 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -15,6 +15,7 @@ """PyTorch Falcon model.""" import math +import warnings from typing import Optional, Tuple, Union import torch @@ -614,7 +615,13 @@ def forward( head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, + **kwargs, ): + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads # 3 x [batch_size, seq_length, num_heads, head_dim] @@ -751,7 +758,16 @@ def forward( head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, + **kwargs, ): + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads # 3 x [batch_size, seq_length, num_heads, head_dim] @@ -973,7 +989,13 @@ def forward( head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, + **kwargs, ): + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + residual = hidden_states if self.config.new_decoder_architecture: @@ -992,6 +1014,7 @@ def forward( head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, + **kwargs, ) attention_output = attn_outputs[0] diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 3a6a642806ee..18e29e320d39 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -20,6 +20,7 @@ """ PyTorch Mistral model.""" import inspect import math +import warnings from typing import List, Optional, Tuple, Union import torch @@ -407,7 +408,12 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, + **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) @@ -496,7 +502,15 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, + **kwargs, ): + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) @@ -763,12 +777,17 @@ def forward( past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, + **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, sequence_length)` where padding elements are indicated by 0. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. From b438dc84883f1ffe589152663781fed60a2cde46 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 19 Oct 2023 23:23:11 +0000 Subject: [PATCH 23/24] rename to converter --- .../jax-projects/big_bird/bigbird_flax.py | 2 +- .../jax-projects/big_bird/train.py | 2 +- .../vqgan-clip/VQGAN_CLIP.py | 2 +- .../models/falcon/modeling_falcon.py | 206 ++++++------------ .../models/llama/modeling_llama.py | 175 ++++++--------- .../models/mistral/modeling_mistral.py | 173 ++++++--------- tests/models/llama/test_modeling_llama.py | 114 ++++------ 7 files changed, 241 insertions(+), 433 deletions(-) diff --git a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py index af5e11c83a6a..c171b88800ed 100644 --- a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py +++ b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py @@ -9,13 +9,13 @@ import jax.numpy as jnp import joblib import optax -import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm +import wandb from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule diff --git a/examples/research_projects/jax-projects/big_bird/train.py b/examples/research_projects/jax-projects/big_bird/train.py index ce37b7f975bb..3840918d16ae 100644 --- a/examples/research_projects/jax-projects/big_bird/train.py +++ b/examples/research_projects/jax-projects/big_bird/train.py @@ -2,11 +2,11 @@ from dataclasses import replace import jax -import wandb from bigbird_flax import Args, DataCollator, FlaxBigBirdForNaturalQuestions, Trainer, build_tx, train_step, val_step from datasets import load_dataset from flax import jax_utils +import wandb from transformers import BigBirdTokenizerFast diff --git a/examples/research_projects/vqgan-clip/VQGAN_CLIP.py b/examples/research_projects/vqgan-clip/VQGAN_CLIP.py index 1bfbc4cd5c36..2a39955e347f 100644 --- a/examples/research_projects/vqgan-clip/VQGAN_CLIP.py +++ b/examples/research_projects/vqgan-clip/VQGAN_CLIP.py @@ -4,12 +4,12 @@ import imageio import torch import torchvision -import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn +import wandb from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index 468aa2b18ac8..6eaeed419977 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -89,16 +89,14 @@ def _get_unpad_data(attention_mask): ) -# Copied from transformers.models.llama.modeling_llama.AttentionMaskCache -class AttentionMaskCache: +# Copied from transformers.models.llama.modeling_llama.AttnMaskConverter +class AttnMaskConverter: """ A utility attention mask class that allows: - - Create a causal mask 4d mask - - Convert a 2D attention mask (batch_size, query_length) to a 4D attention mask (batch_size, 1, query_length, + - Create a causal 4d mask + - Create a causal 4d mask with slided window + - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores - - Check whether 2D attention mask has any padding tokens or not - - To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. Parameters: is_causal (`bool`): @@ -112,28 +110,6 @@ def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal self.sliding_window = sliding_window - self.cache_4d_mask = {} - self.cache_4d_mask_only_causal = {} - self.cache_has_mask = {} - - def _hash_tensor(self, tensor: torch.Tensor, shape: Tuple[int] = ()): - # we need to use both the unique id, memory address, the _version, and shape of the tensor as a key - # object to be certain to not accidentally return an incorrect hashed key (e.g. if the tensor has been updated in-place, only the version is increased) - return (id(tensor), tensor._version, tensor.shape + shape) - - def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: - """ - Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. - """ - mask_2d_hash = self._hash_tensor(attention_mask_2d) - if mask_2d_hash not in self.cache_has_mask and len(self.cache_has_mask) > 0: - self.cache_has_mask = {} - - if mask_2d_hash not in self.cache_has_mask: - self.cache_has_mask[mask_2d_hash] = 0 in attention_mask_2d - - return self.cache_has_mask[mask_2d_hash] - def to_causal_4d( self, batch_size: int, @@ -144,39 +120,29 @@ def to_causal_4d( ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative - bias to upper right hand triangular matrix (causal mask). If cached 4D attention mask can be reused, no new - memory will be allocated. + bias to upper right hand triangular matrix (causal mask). """ - expected_shape = (batch_size, 1, query_length, key_value_length) - if not self.is_causal: raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") - # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache - if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: - self.cache_4d_mask_only_causal = {} - # If shape is not cached, create a new causal mask and cache it - if expected_shape not in self.cache_4d_mask_only_causal: - input_shape = (batch_size, query_length) - past_key_values_length = key_value_length - query_length + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - causal_4d_mask = None - if input_shape[-1] > 1 or self.sliding_window is not None: - past_key_values_length = key_value_length - query_length - causal_4d_mask = self._make_causal_mask( - input_shape, - dtype, - device=device, - past_key_values_length=past_key_values_length, - sliding_window=self.sliding_window, - ) - - self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 or self.sliding_window is not None: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, + ) - return self.cache_4d_mask_only_causal[expected_shape] + return causal_4d_mask def to_4d( self, @@ -188,44 +154,33 @@ def to_4d( """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is - causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be - allocated. + causal, a causal mask will be added. """ - mask_2d_hash = self._hash_tensor(attention_mask_2d, (query_length, key_value_length)) - - # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if mask_2d_hash not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: - self.cache_4d_mask = {} + input_shape = (attention_mask_2d.shape[0], query_length) + past_key_values_length = key_value_length - query_length - # If attention_mask is not cached, create a new one and cache it - if mask_2d_hash not in self.cache_4d_mask: - input_shape = (attention_mask_2d.shape[0], query_length) + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: past_key_values_length = key_value_length - query_length - - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - causal_4d_mask = None - if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: - past_key_values_length = key_value_length - query_length - causal_4d_mask = self._make_causal_mask( - input_shape, - dtype, - device=attention_mask_2d.device, - past_key_values_length=past_key_values_length, - sliding_window=self.sliding_window, - ) - elif self.sliding_window is not None: - raise NotImplementedError("Sliding window is currently only implemented for causal masking") - - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( - attention_mask_2d.device + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) - cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") - self.cache_4d_mask[mask_2d_hash] = cached_4d_mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device + ) + expanded_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - return self.cache_4d_mask[mask_2d_hash] + return expanded_4d_mask def _make_causal_mask( self, @@ -485,7 +440,7 @@ def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: class FalconAttention(nn.Module): - def __init__(self, config: FalconConfig, attention_mask_cache=None): + def __init__(self, config: FalconConfig): super().__init__() self.config = config @@ -494,7 +449,7 @@ def __init__(self, config: FalconConfig, attention_mask_cache=None): self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout - self.attention_mask_cache = attention_mask_cache + self.is_causal = True if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( @@ -654,17 +609,6 @@ def forward( else: present = None - if attention_mask is not None: - # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_4d( - attention_mask, query_length, kv_length, query_layer.dtype - ) - elif self.attention_mask_cache.is_causal: - # create 4d causal mask. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_causal_4d( - batch_size, query_length, kv_length, query_layer.dtype, query_layer.device - ) - query_layer_ = query_layer.reshape(batch_size, self.num_heads, -1, self.head_dim) key_layer_ = key_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim) value_layer_ = value_layer.reshape(batch_size, num_kv_heads, -1, self.head_dim) @@ -868,7 +812,7 @@ def _flash_attention_forward( The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ # Contains at least one padding token in the sequence - if attention_mask is not None and self.attention_mask_cache.has_mask(attention_mask): + if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length @@ -887,7 +831,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=self.attention_mask_cache.is_causal, + causal=self.is_causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) @@ -955,15 +899,15 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class FalconDecoderLayer(nn.Module): - def __init__(self, config: FalconConfig, attention_mask_cache=None): + def __init__(self, config: FalconConfig, attn_mask_converter=None): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = ( - FalconAttention(config, attention_mask_cache=attention_mask_cache) + FalconAttention(config) if not getattr(config, "_flash_attn_2_enabled", False) - else FalconFlashAttention2(config, attention_mask_cache=attention_mask_cache) + else FalconFlashAttention2(config) ) self.mlp = FalconMLP(config) self.hidden_dropout = config.hidden_dropout @@ -1211,12 +1155,10 @@ def __init__(self, config: FalconConfig): # create attention mask cache that trickles down to each attention layer # so that the attention_mask cache can be shared among layers - attention_mask_cache = AttentionMaskCache(is_causal=True) + self.attn_mask_converter = AttnMaskConverter(is_causal=True) # Transformer blocks - self.h = nn.ModuleList( - [FalconDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)] - ) + self.h = nn.ModuleList([FalconDecoderLayer(config) for _ in range(config.num_hidden_layers)]) # Final Layer Norm self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) @@ -1229,37 +1171,6 @@ def __init__(self, config: FalconConfig): def get_input_embeddings(self): return self.word_embeddings - @staticmethod - def _prepare_attn_mask( - attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int - ) -> torch.BoolTensor: - # Create a causal mask - # The attention mask we receive as input should cover the whole extended sequence, including any past - # cache, so its shape should be [batch_size, seq_length + past_key_values_length] - # The output shape will be [batch_size, 1, seq_length, seq_length + past_key_values_length] - if input_shape[1] + past_key_values_length != attention_mask.shape[1]: - raise ValueError( - "Attention mask shape should be (batch_size, seq_length + past_key_values_length)" - f" but is {attention_mask.shape} with input_ids shape {input_shape} and past length" - f" {past_key_values_length}." - ) - combined_attention_mask = None - device = attention_mask.device - _, seq_length = input_shape - - if seq_length > 1: - combined_attention_mask = _make_causal_mask( - input_shape, device=device, past_key_values_length=past_key_values_length - ) - - # [batch_size, seq_length + past_key_values_length] -> [batch_size, 1, seq_length, seq_length + past_key_values_length] - expanded_attn_mask = _expand_mask(attention_mask, past_key_values_length=past_key_values_length) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask - ) - - return combined_attention_mask - def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings @@ -1347,6 +1258,21 @@ def forward( ) position_ids = position_ids.unsqueeze(0) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + key_value_length = seq_length + past_key_values_length + # 4d mask is passed through the layers + if attention_mask is not None: + attention_mask = self.attn_mask_converter.to_4d( + attention_mask, seq_length, key_value_length, dtype=inputs_embeds.dtype + ) + else: + attention_mask = self.attn_mask_converter.to_causal_4d( + batch_size, seq_length, key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device + ) + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 0053a7114e17..541455d86afd 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -64,15 +64,13 @@ def _get_unpad_data(attention_mask): ) -class AttentionMaskCache: +class AttnMaskConverter: """ A utility attention mask class that allows: - - Create a causal mask 4d mask - - Convert a 2D attention mask (batch_size, query_length) to a 4D attention mask (batch_size, 1, query_length, + - Create a causal 4d mask + - Create a causal 4d mask with slided window + - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores - - Check whether 2D attention mask has any padding tokens or not - - To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. Parameters: is_causal (`bool`): @@ -86,28 +84,6 @@ def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal self.sliding_window = sliding_window - self.cache_4d_mask = {} - self.cache_4d_mask_only_causal = {} - self.cache_has_mask = {} - - def _hash_tensor(self, tensor: torch.Tensor, shape: Tuple[int] = ()): - # we need to use both the unique id, memory address, the _version, and shape of the tensor as a key - # object to be certain to not accidentally return an incorrect hashed key (e.g. if the tensor has been updated in-place, only the version is increased) - return (id(tensor), tensor._version, tensor.shape + shape) - - def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: - """ - Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. - """ - mask_2d_hash = self._hash_tensor(attention_mask_2d) - if mask_2d_hash not in self.cache_has_mask and len(self.cache_has_mask) > 0: - self.cache_has_mask = {} - - if mask_2d_hash not in self.cache_has_mask: - self.cache_has_mask[mask_2d_hash] = 0 in attention_mask_2d - - return self.cache_has_mask[mask_2d_hash] - def to_causal_4d( self, batch_size: int, @@ -118,39 +94,29 @@ def to_causal_4d( ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative - bias to upper right hand triangular matrix (causal mask). If cached 4D attention mask can be reused, no new - memory will be allocated. + bias to upper right hand triangular matrix (causal mask). """ - expected_shape = (batch_size, 1, query_length, key_value_length) - if not self.is_causal: raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") - # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache - if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: - self.cache_4d_mask_only_causal = {} - # If shape is not cached, create a new causal mask and cache it - if expected_shape not in self.cache_4d_mask_only_causal: - input_shape = (batch_size, query_length) - past_key_values_length = key_value_length - query_length - - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - causal_4d_mask = None - if input_shape[-1] > 1 or self.sliding_window is not None: - past_key_values_length = key_value_length - query_length - causal_4d_mask = self._make_causal_mask( - input_shape, - dtype, - device=device, - past_key_values_length=past_key_values_length, - sliding_window=self.sliding_window, - ) + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length - self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 or self.sliding_window is not None: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, + ) - return self.cache_4d_mask_only_causal[expected_shape] + return causal_4d_mask def to_4d( self, @@ -162,44 +128,33 @@ def to_4d( """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is - causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be - allocated. + causal, a causal mask will be added. """ - mask_2d_hash = self._hash_tensor(attention_mask_2d, (query_length, key_value_length)) - - # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if mask_2d_hash not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: - self.cache_4d_mask = {} + input_shape = (attention_mask_2d.shape[0], query_length) + past_key_values_length = key_value_length - query_length - # If attention_mask is not cached, create a new one and cache it - if mask_2d_hash not in self.cache_4d_mask: - input_shape = (attention_mask_2d.shape[0], query_length) + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: past_key_values_length = key_value_length - query_length - - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - causal_4d_mask = None - if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: - past_key_values_length = key_value_length - query_length - causal_4d_mask = self._make_causal_mask( - input_shape, - dtype, - device=attention_mask_2d.device, - past_key_values_length=past_key_values_length, - sliding_window=self.sliding_window, - ) - elif self.sliding_window is not None: - raise NotImplementedError("Sliding window is currently only implemented for causal masking") - - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( - attention_mask_2d.device + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) - cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") - self.cache_4d_mask[mask_2d_hash] = cached_4d_mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device + ) + expanded_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - return self.cache_4d_mask[mask_2d_hash] + return expanded_4d_mask def _make_causal_mask( self, @@ -411,7 +366,7 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: class LlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" - def __init__(self, config: LlamaConfig, attention_mask_cache=None): + def __init__(self, config: LlamaConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -421,7 +376,7 @@ def __init__(self, config: LlamaConfig, attention_mask_cache=None): self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta - self.attention_mask_cache = attention_mask_cache + self.is_causal = True if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -531,15 +486,6 @@ def forward( f" {attn_weights.size()}" ) - if attention_mask is not None: - # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_4d(attention_mask, q_len, kv_seq_len, attn_weights.dtype) - elif self.attention_mask_cache.is_causal: - # create 4d causal mask. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_causal_4d( - bsz, q_len, kv_seq_len, attn_weights.dtype, attn_weights.device - ) - if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( @@ -698,7 +644,7 @@ def _flash_attention_forward( The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) """ # Contains at least one padding token in the sequence - if attention_mask is not None and self.attention_mask_cache.has_mask(attention_mask): + if attention_mask is not None: batch_size = query_states.shape[0] query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( query_states, key_states, value_states, attention_mask, query_length @@ -717,7 +663,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=self.attention_mask_cache.is_causal, + causal=self.is_causal, ) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) @@ -768,13 +714,13 @@ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query class LlamaDecoderLayer(nn.Module): - def __init__(self, config: LlamaConfig, attention_mask_cache=None): + def __init__(self, config: LlamaConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ( - LlamaAttention(config=config, attention_mask_cache=attention_mask_cache) + LlamaAttention(config=config) if not getattr(config, "_flash_attn_2_enabled", False) - else LlamaFlashAttention2(config=config, attention_mask_cache=attention_mask_cache) + else LlamaFlashAttention2(config=config) ) self.mlp = LlamaMLP(config) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -969,12 +915,10 @@ def __init__(self, config: LlamaConfig): # create attention mask cache that trickles down to each attention layer # so that the attention_mask cache can be shared among layers - attention_mask_cache = AttentionMaskCache(is_causal=True) + self.attn_mask_converter = AttnMaskConverter(is_causal=True) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.layers = nn.ModuleList( - [LlamaDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)] - ) + self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False @@ -1012,9 +956,9 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - _, seq_length = input_ids.shape[:2] + batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: - _, seq_length = inputs_embeds.shape[:2] + batch_size, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") @@ -1032,6 +976,21 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + key_value_length = seq_length + past_key_values_length + # 4d mask is passed through the layers + if attention_mask is not None: + attention_mask = self.attn_mask_converter.to_4d( + attention_mask, seq_length, key_value_length, dtype=inputs_embeds.dtype + ) + else: + attention_mask = self.attn_mask_converter.to_causal_4d( + batch_size, seq_length, key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device + ) + # embed positions hidden_states = inputs_embeds diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 18e29e320d39..0ee86e5a58f3 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -54,16 +54,14 @@ _CONFIG_FOR_DOC = "MistralConfig" -# Copied from transformers.models.llama.modeling_llama.AttentionMaskCache -class AttentionMaskCache: +# Copied from transformers.models.llama.modeling_llama.AttnMaskConverter +class AttnMaskConverter: """ A utility attention mask class that allows: - - Create a causal mask 4d mask - - Convert a 2D attention mask (batch_size, query_length) to a 4D attention mask (batch_size, 1, query_length, + - Create a causal 4d mask + - Create a causal 4d mask with slided window + - Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length, key_value_length) that can be multiplied with attention scores - - Check whether 2D attention mask has any padding tokens or not - - To avoid unnecessary memory allocation, attention masks are cached and can be easily reused. Parameters: is_causal (`bool`): @@ -77,28 +75,6 @@ def __init__(self, is_causal: bool, sliding_window: Optional[int] = None): self.is_causal = is_causal self.sliding_window = sliding_window - self.cache_4d_mask = {} - self.cache_4d_mask_only_causal = {} - self.cache_has_mask = {} - - def _hash_tensor(self, tensor: torch.Tensor, shape: Tuple[int] = ()): - # we need to use both the unique id, memory address, the _version, and shape of the tensor as a key - # object to be certain to not accidentally return an incorrect hashed key (e.g. if the tensor has been updated in-place, only the version is increased) - return (id(tensor), tensor._version, tensor.shape + shape) - - def has_mask(self, attention_mask_2d: torch.Tensor) -> bool: - """ - Checks whether the attention_mask actually has a padding token or whether it has only non-padding tokens. - """ - mask_2d_hash = self._hash_tensor(attention_mask_2d) - if mask_2d_hash not in self.cache_has_mask and len(self.cache_has_mask) > 0: - self.cache_has_mask = {} - - if mask_2d_hash not in self.cache_has_mask: - self.cache_has_mask[mask_2d_hash] = 0 in attention_mask_2d - - return self.cache_has_mask[mask_2d_hash] - def to_causal_4d( self, batch_size: int, @@ -109,39 +85,29 @@ def to_causal_4d( ) -> torch.Tensor: """ Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative - bias to upper right hand triangular matrix (causal mask). If cached 4D attention mask can be reused, no new - memory will be allocated. + bias to upper right hand triangular matrix (causal mask). """ - expected_shape = (batch_size, 1, query_length, key_value_length) - if not self.is_causal: raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.") - # If the attention_mask shape does not match, but there is still a tensor in the cache, empty the cache - if expected_shape not in self.cache_4d_mask_only_causal and len(self.cache_4d_mask_only_causal) > 0: - self.cache_4d_mask_only_causal = {} - # If shape is not cached, create a new causal mask and cache it - if expected_shape not in self.cache_4d_mask_only_causal: - input_shape = (batch_size, query_length) - past_key_values_length = key_value_length - query_length - - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - causal_4d_mask = None - if input_shape[-1] > 1 or self.sliding_window is not None: - past_key_values_length = key_value_length - query_length - causal_4d_mask = self._make_causal_mask( - input_shape, - dtype, - device=device, - past_key_values_length=past_key_values_length, - sliding_window=self.sliding_window, - ) + input_shape = (batch_size, query_length) + past_key_values_length = key_value_length - query_length - self.cache_4d_mask_only_causal[expected_shape] = causal_4d_mask + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if input_shape[-1] > 1 or self.sliding_window is not None: + past_key_values_length = key_value_length - query_length + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, + ) - return self.cache_4d_mask_only_causal[expected_shape] + return causal_4d_mask def to_4d( self, @@ -153,44 +119,33 @@ def to_4d( """ Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length, key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is - causal, a causal mask will be added. If cached 4D attention mask can be reused, no new memory will be - allocated. + causal, a causal mask will be added. """ - mask_2d_hash = self._hash_tensor(attention_mask_2d, (query_length, key_value_length)) - - # If the attention_mask does not match, but there is still a tensor in the cache, empty the cache - if mask_2d_hash not in self.cache_4d_mask and len(self.cache_4d_mask) > 0: - self.cache_4d_mask = {} + input_shape = (attention_mask_2d.shape[0], query_length) + past_key_values_length = key_value_length - query_length - # If attention_mask is not cached, create a new one and cache it - if mask_2d_hash not in self.cache_4d_mask: - input_shape = (attention_mask_2d.shape[0], query_length) + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + causal_4d_mask = None + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: past_key_values_length = key_value_length - query_length - - # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - causal_4d_mask = None - if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: - past_key_values_length = key_value_length - query_length - causal_4d_mask = self._make_causal_mask( - input_shape, - dtype, - device=attention_mask_2d.device, - past_key_values_length=past_key_values_length, - sliding_window=self.sliding_window, - ) - elif self.sliding_window is not None: - raise NotImplementedError("Sliding window is currently only implemented for causal masking") - - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( - attention_mask_2d.device + causal_4d_mask = self._make_causal_mask( + input_shape, + dtype, + device=attention_mask_2d.device, + past_key_values_length=past_key_values_length, + sliding_window=self.sliding_window, ) - cached_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask + elif self.sliding_window is not None: + raise NotImplementedError("Sliding window is currently only implemented for causal masking") - self.cache_4d_mask[mask_2d_hash] = cached_4d_mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to( + attention_mask_2d.device + ) + expanded_4d_mask = expanded_attn_mask if causal_4d_mask is None else expanded_attn_mask + causal_4d_mask - return self.cache_4d_mask[mask_2d_hash] + return expanded_4d_mask def _make_causal_mask( self, @@ -369,7 +324,7 @@ class MistralAttention(nn.Module): and "Generating Long Sequences with Sparse Transformers". """ - def __init__(self, config: MistralConfig, attention_mask_cache=None): + def __init__(self, config: MistralConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -379,7 +334,7 @@ def __init__(self, config: MistralConfig, attention_mask_cache=None): self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta - self.attention_mask_cache = attention_mask_cache + self.is_causal = True if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( @@ -449,15 +404,6 @@ def forward( f" {attn_weights.size()}" ) - if attention_mask is not None: - # convert 2d -> 4d. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_4d(attention_mask, q_len, kv_seq_len, attn_weights.dtype) - elif self.attention_mask_cache.is_causal: - # create 4d causal mask. Re-use cached mask if available - attention_mask = self.attention_mask_cache.to_causal_4d( - bsz, q_len, kv_seq_len, attn_weights.dtype, attn_weights.device - ) - if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( @@ -677,7 +623,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=self.attention_mask_cache.is_causal, + causal=self.is_causal, ) else: attn_output_unpad = flash_attn_varlen_func( @@ -690,7 +636,7 @@ def _flash_attention_forward( max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, - causal=self.attention_mask_cache.is_causal, + causal=self.is_causal, window_size=(self.config.sliding_window, self.config.sliding_window), ) @@ -757,13 +703,13 @@ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query class MistralDecoderLayer(nn.Module): - def __init__(self, config: MistralConfig, attention_mask_cache=None): + def __init__(self, config: MistralConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ( - MistralAttention(config=config, attention_mask_cache=attention_mask_cache) + MistralAttention(config=config) if not getattr(config, "_flash_attn_2_enabled", False) - else MistralFlashAttention2(config, attention_mask_cache=attention_mask_cache) + else MistralFlashAttention2(config) ) self.mlp = MistralMLP(config) self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -957,12 +903,10 @@ def __init__(self, config: MistralConfig): # create attention mask cache that trickles down to each attention layer # so that the attention_mask cache can be shared among layers - attention_mask_cache = AttentionMaskCache(is_causal=True, sliding_window=config.sliding_window) + self.attn_mask_converter = AttnMaskConverter(is_causal=True, sliding_window=config.sliding_window) self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.layers = nn.ModuleList( - [MistralDecoderLayer(config, attention_mask_cache) for _ in range(config.num_hidden_layers)] - ) + self.layers = nn.ModuleList([MistralDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False @@ -1038,6 +982,21 @@ def forward( " call `tokenizer.padding_side = 'left'` before tokenizing the input. " ) + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + key_value_length = seq_length + past_key_values_length + # 4d mask is passed through the layers + if attention_mask is not None: + attention_mask = self.attn_mask_converter.to_4d( + attention_mask, seq_length, key_value_length, dtype=inputs_embeds.dtype + ) + else: + attention_mask = self.attn_mask_converter.to_causal_4d( + batch_size, seq_length, key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device + ) + hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index 258e729baf2d..df41c6c5f520 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -39,7 +39,7 @@ LlamaModel, LlamaTokenizer, ) - from transformers.models.llama.modeling_llama import AttentionMaskCache + from transformers.models.llama.modeling_llama import AttnMaskConverter @require_torch @@ -51,24 +51,19 @@ def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() - def check_to_4d(self, mask_cache, q_len, kv_len, additional_mask=None, bsz=3): + def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3): mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) if additional_mask is not None: for bsz_idx, seq_idx in additional_mask: mask_2d[bsz_idx, seq_idx] = 0 - mask_4d = mask_cache.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len) - - # check cache - hash_key = mask_cache._hash_tensor(mask_2d, (q_len, kv_len)) - assert hash_key in mask_cache.cache_4d_mask - assert mask_cache.cache_4d_mask[hash_key] is mask_4d + mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len) assert mask_4d.shape == (bsz, 1, q_len, kv_len) - context = mask_cache.sliding_window - if mask_cache.is_causal and context is None: + context = mask_converter.sliding_window + if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) @@ -78,12 +73,12 @@ def check_to_4d(self, mask_cache, q_len, kv_len, additional_mask=None, bsz=3): # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) - elif not mask_cache.is_causal and context is None: + elif not mask_converter.is_causal and context is None: if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == 0 if 0 in mask_2d: self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) - elif mask_cache.is_causal and context is not None: + elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked @@ -95,28 +90,23 @@ def check_to_4d(self, mask_cache, q_len, kv_len, additional_mask=None, bsz=3): assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) - def check_to_causal(self, mask_cache, q_len, kv_len, bsz=3): - mask_4d = mask_cache.to_causal_4d(bsz, query_length=q_len, key_value_length=kv_len, device=torch_device) + def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3): + mask_4d = mask_converter.to_causal_4d(bsz, query_length=q_len, key_value_length=kv_len, device=torch_device) - if q_len == 1 and mask_cache.sliding_window is None: + if q_len == 1 and mask_converter.sliding_window is None: # no causal mask if q_len is 1 assert mask_4d is None return - # check cache - mask_2d_shape = (bsz, 1, q_len, kv_len) - assert mask_2d_shape in mask_cache.cache_4d_mask_only_causal - assert mask_cache.cache_4d_mask_only_causal[mask_2d_shape] is mask_4d - - context = mask_cache.sliding_window - if mask_cache.is_causal and context is None: + context = mask_converter.sliding_window + if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked - elif not mask_cache.is_causal and context is None: + elif not mask_converter.is_causal and context is None: assert (mask_4d != 0).sum().cpu().item() == 0 - elif mask_cache.is_causal and context is not None: + elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked @@ -133,91 +123,65 @@ def compute_num_context_mask(self, kv_len, context, q_len): return num_mask_triangle - num_cut_mask def test_2d_to_4d_causal(self): - mask_cache = AttentionMaskCache(is_causal=True) + mask_converter = AttnMaskConverter(is_causal=True) # auto-regressive use case - self.check_to_4d(mask_cache, q_len=1, kv_len=7) + self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case - self.check_to_4d(mask_cache, q_len=3, kv_len=7) + self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case - self.check_to_4d(mask_cache, q_len=7, kv_len=7) + self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks - self.check_to_4d(mask_cache, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) - self.check_to_4d(mask_cache, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) - self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d(self): torch.ones((3, 7), device=torch_device, dtype=torch.long) - mask_cache = AttentionMaskCache(is_causal=False) + mask_converter = AttnMaskConverter(is_causal=False) # non auto-regressive case - self.check_to_4d(mask_cache, q_len=7, kv_len=7) + self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks - self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): torch.ones((3, 7), device=torch_device, dtype=torch.long) - mask_cache = AttentionMaskCache(is_causal=True, sliding_window=5) + mask_converter = AttnMaskConverter(is_causal=True, sliding_window=5) # auto-regressive use case - self.check_to_4d(mask_cache, q_len=1, kv_len=7) + self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case - self.check_to_4d(mask_cache, q_len=3, kv_len=7) + self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case - self.check_to_4d(mask_cache, q_len=7, kv_len=7) + self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks - self.check_to_4d(mask_cache, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) - self.check_to_4d(mask_cache, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) - self.check_to_4d(mask_cache, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) + self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_causal_mask(self): - mask_cache = AttentionMaskCache(is_causal=True) + mask_converter = AttnMaskConverter(is_causal=True) # auto-regressive use case - self.check_to_causal(mask_cache, q_len=1, kv_len=7) + self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case - self.check_to_causal(mask_cache, q_len=3, kv_len=7) + self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case - self.check_to_causal(mask_cache, q_len=7, kv_len=7) + self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_causal_mask_sliding(self): - mask_cache = AttentionMaskCache(is_causal=True, sliding_window=3) + mask_converter = AttnMaskConverter(is_causal=True, sliding_window=3) # auto-regressive use case - self.check_to_causal(mask_cache, q_len=1, kv_len=7) + self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case - self.check_to_causal(mask_cache, q_len=3, kv_len=7) + self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case - self.check_to_causal(mask_cache, q_len=7, kv_len=7) - - def test_has_mask(self): - mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) - mask_cache = AttentionMaskCache(False) - - assert not mask_cache.has_mask(mask_2d) - hash_key = mask_cache._hash_tensor(mask_2d) - assert hash_key in mask_cache.cache_has_mask - - mask_2d[1, 1] = 0 - assert mask_cache.has_mask(mask_2d) - hash_key = mask_cache._hash_tensor(mask_2d) - assert hash_key in mask_cache.cache_has_mask - - def test_in_place_tensor_is_cached(self): - mask_2d = torch.ones((3, 7), device=torch_device, dtype=torch.long) - mask_cache = AttentionMaskCache(False) - - _ = mask_cache.to_4d(mask_2d, 3, 7) - hash_key = mask_cache._hash_tensor(mask_2d, (3, 7)) - assert hash_key in mask_cache.cache_4d_mask - - # make sure in-place change is noticed - mask_2d[0, 3] = 4 - hash_key = mask_cache._hash_tensor(mask_2d, (3, 7)) - assert hash_key not in mask_cache.cache_4d_mask + self.check_to_causal(mask_converter, q_len=7, kv_len=7) class LlamaModelTester: From 53399888435e8732057392bad8d79febbca13051 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 20 Oct 2023 01:32:30 +0200 Subject: [PATCH 24/24] Apply suggestions from code review --- .../research_projects/jax-projects/big_bird/bigbird_flax.py | 2 +- examples/research_projects/jax-projects/big_bird/train.py | 2 +- examples/research_projects/vqgan-clip/VQGAN_CLIP.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py index c171b88800ed..af5e11c83a6a 100644 --- a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py +++ b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py @@ -9,13 +9,13 @@ import jax.numpy as jnp import joblib import optax +import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm -import wandb from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule diff --git a/examples/research_projects/jax-projects/big_bird/train.py b/examples/research_projects/jax-projects/big_bird/train.py index 3840918d16ae..ce37b7f975bb 100644 --- a/examples/research_projects/jax-projects/big_bird/train.py +++ b/examples/research_projects/jax-projects/big_bird/train.py @@ -2,11 +2,11 @@ from dataclasses import replace import jax +import wandb from bigbird_flax import Args, DataCollator, FlaxBigBirdForNaturalQuestions, Trainer, build_tx, train_step, val_step from datasets import load_dataset from flax import jax_utils -import wandb from transformers import BigBirdTokenizerFast diff --git a/examples/research_projects/vqgan-clip/VQGAN_CLIP.py b/examples/research_projects/vqgan-clip/VQGAN_CLIP.py index 2a39955e347f..1bfbc4cd5c36 100644 --- a/examples/research_projects/vqgan-clip/VQGAN_CLIP.py +++ b/examples/research_projects/vqgan-clip/VQGAN_CLIP.py @@ -4,12 +4,12 @@ import imageio import torch import torchvision +import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn -import wandb from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil