Skip to content

Commit

Permalink
Generation tests: update imagegpt input name, remove unused functions (
Browse files Browse the repository at this point in the history
  • Loading branch information
gante authored and BernardZach committed Dec 5, 2024
1 parent 5b1a672 commit f02923a
Show file tree
Hide file tree
Showing 18 changed files with 23 additions and 656 deletions.
9 changes: 3 additions & 6 deletions src/transformers/models/imagegpt/modeling_imagegpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -690,8 +690,7 @@ def forward(

if "pixel_values" in kwargs:
warnings.warn(
"The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
" instead.",
"The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.",
FutureWarning,
)

Expand Down Expand Up @@ -1004,8 +1003,7 @@ def forward(

if "pixel_values" in kwargs:
warnings.warn(
"The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
" instead.",
"The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.",
FutureWarning,
)

Expand Down Expand Up @@ -1137,8 +1135,7 @@ def forward(

if "pixel_values" in kwargs:
warnings.warn(
"The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
" instead.",
"The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.",
FutureWarning,
)

Expand Down
20 changes: 0 additions & 20 deletions tests/generation/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,26 +190,6 @@ def _get_constrained_beam_kwargs(self, num_return_sequences=1):
}
return beam_kwargs

@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
generation_config = copy.deepcopy(model.generation_config)
model._prepare_special_tokens(generation_config)
input_ids = torch.zeros_like(input_ids[:, :1]) + generation_config.decoder_start_token_id
attention_mask = None
return encoder_outputs, input_ids, attention_mask

def _greedy_generate(
self,
model,
Expand Down
31 changes: 1 addition & 30 deletions tests/models/codegen/test_modeling_codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin


Expand Down Expand Up @@ -150,35 +150,6 @@ def get_config(self):
rotary_dim=self.rotary_dim,
)

def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()

encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)

def create_and_check_codegen_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CodeGenModel(config=config)
model.to(torch_device)
Expand Down
19 changes: 0 additions & 19 deletions tests/models/falcon_mamba/test_modeling_falcon_mamba.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,25 +150,6 @@ def get_pipeline_config(self):
config.vocab_size = 300
return config

def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()

return (
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
)

def create_and_check_falcon_mamba_model(self, config, input_ids, *args):
config.output_hidden_states = True
model = FalconMambaModel(config=config)
Expand Down
31 changes: 1 addition & 30 deletions tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin


Expand Down Expand Up @@ -178,35 +178,6 @@ def get_pipeline_config(self):
config.vocab_size = 300
return config

def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()

encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)

def create_and_check_gpt_bigcode_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPTBigCodeModel(config=config)
model.to(torch_device)
Expand Down
31 changes: 1 addition & 30 deletions tests/models/gpt_neo/test_modeling_gpt_neo.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin


Expand Down Expand Up @@ -157,35 +157,6 @@ def get_pipeline_config(self):
config.vocab_size = 300
return config

def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()

encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)

def create_and_check_gpt_neo_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPTNeoModel(config=config)
model.to(torch_device)
Expand Down
31 changes: 1 addition & 30 deletions tests/models/gptj/test_modeling_gptj.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin


Expand Down Expand Up @@ -173,35 +173,6 @@ def get_pipeline_config(self):
config.vocab_size = 300
return config

def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()

encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)

def create_and_check_gptj_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPTJModel(config=config)
model.to(torch_device)
Expand Down
Loading

0 comments on commit f02923a

Please sign in to comment.