From 9a451e2f340cab845f66c270b9d29fe3bf2e3cfa Mon Sep 17 00:00:00 2001 From: sambhavnoobcoder Date: Mon, 27 Jan 2025 21:41:33 +0530 Subject: [PATCH] tests fixup --- tests/models/paligemma/test_modeling_paligemma.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/models/paligemma/test_modeling_paligemma.py b/tests/models/paligemma/test_modeling_paligemma.py index 6e7324093d5e..cab278a1dc8e 100644 --- a/tests/models/paligemma/test_modeling_paligemma.py +++ b/tests/models/paligemma/test_modeling_paligemma.py @@ -354,7 +354,7 @@ def test_generate_compile_model_forward(self): def test_attention_mask_with_token_types(self): """Test that attention masking works correctly both with and without token type IDs.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - + for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) @@ -365,9 +365,9 @@ def test_attention_mask_with_token_types(self): **inputs_dict, output_attentions=True, ) - - # Case 2: Without token_type_ids - inputs_no_types = {k: v for k, v in inputs_dict.items() if k != 'token_type_ids'} + + # Case 2: Without token_type_ids + inputs_no_types = {k: v for k, v in inputs_dict.items() if k != "token_type_ids"} outputs_no_types = model( **inputs_no_types, output_attentions=True, @@ -389,7 +389,7 @@ def test_attention_mask_with_token_types(self): # Verify attention weights for pad tokens are zero self.assertTrue( torch.all(layer_attn[batch_idx, :, :, seq_idx] == 0), - f"Found non-zero attention weights for padding token at batch {batch_idx}, sequence position {seq_idx}" + f"Found non-zero attention weights for padding token at batch {batch_idx}, sequence position {seq_idx}", )