Skip to content

Commit

Permalink
tests fixup
Browse files Browse the repository at this point in the history
  • Loading branch information
sambhavnoobcoder committed Jan 27, 2025
1 parent 1039978 commit 9a451e2
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions tests/models/paligemma/test_modeling_paligemma.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ def test_generate_compile_model_forward(self):
def test_attention_mask_with_token_types(self):
"""Test that attention masking works correctly both with and without token type IDs."""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
Expand All @@ -365,9 +365,9 @@ def test_attention_mask_with_token_types(self):
**inputs_dict,
output_attentions=True,
)
# Case 2: Without token_type_ids
inputs_no_types = {k: v for k, v in inputs_dict.items() if k != 'token_type_ids'}

# Case 2: Without token_type_ids
inputs_no_types = {k: v for k, v in inputs_dict.items() if k != "token_type_ids"}
outputs_no_types = model(
**inputs_no_types,
output_attentions=True,
Expand All @@ -389,7 +389,7 @@ def test_attention_mask_with_token_types(self):
# Verify attention weights for pad tokens are zero
self.assertTrue(
torch.all(layer_attn[batch_idx, :, :, seq_idx] == 0),
f"Found non-zero attention weights for padding token at batch {batch_idx}, sequence position {seq_idx}"
f"Found non-zero attention weights for padding token at batch {batch_idx}, sequence position {seq_idx}",
)


Expand Down

0 comments on commit 9a451e2

Please sign in to comment.