diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 0aa296e7055f..d457b99b32b2 100755 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -997,11 +997,23 @@ def _generate_dummy_input( ) elif "inputs_embeds" in input_name: batch_size = shape[0] - sequence_length = shape[-1] - inputs_dict[input_name] = torch.zeros( - batch_size, sequence_length, model.config.hidden_size, dtype=torch.float, device=device - ) + if ( + getattr(model.config, "embedding_size", None) is not None + and model.config.model_type != "megatron-bert" + ): + embedding_size = model.config.embedding_size + else: + embedding_size = model.config.hidden_size + + if len(shape) == 3: + # (batch_size, num_choices, sequence_length, embedding_size) + embedding_shape = (batch_size, shape[1], shape[2], embedding_size) + else: + # (batch_size, sequence_length, embedding_size) + embedding_shape = (batch_size, shape[1], embedding_size) + + inputs_dict[input_name] = torch.zeros(embedding_shape, dtype=torch.float, device=device) elif "visual_feats" in input_name: inputs_dict[input_name] = torch.zeros( shape diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 299d99280b33..e7ce653f3e80 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -1215,14 +1215,33 @@ def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=Fa (past_mask, inputs_to_test[1]["attention_mask"]), dim=1 ) - if "inputs_embeds" in inspect.signature(model.forward).parameters and not model.config.is_encoder_decoder: - inputs_to_test.append( - { - "inputs_embeds": torch.rand( - 2, 2, model.config.hidden_size, dtype=torch.float, device=torch_device - ) - } - ) + forward_parameters = inspect.signature(model.forward).parameters + if "input_ids" in forward_parameters and "inputs_embeds" in forward_parameters: + inps = copy.deepcopy(inputs_to_test[0]) + + embedding_size = ( + model.config.embedding_size + if getattr(model.config, "embedding_size", None) is not None + and model.config.model_type != "megatron-bert" + else model.config.hidden_size + ) + + if ( + model.config.model_type in MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES + and model.__class__.__name__ + == MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES[model.config.model_type] + ): + batch_size, num_choices, sequence_length = inputs["input_ids"].shape + shape = (batch_size, num_choices, sequence_length, embedding_size) + elif inps["input_ids"].ndim == 2: + batch_size, sequence_length = inputs["input_ids"].shape + shape = (batch_size, sequence_length, embedding_size) + else: + self.skipTest("Unknown case") + + del inps["input_ids"] + inps["inputs_embeds"] = torch.rand(shape, dtype=torch.float, device=torch_device) + inputs_to_test.append(inps) for inps in inputs_to_test: filtered_inputs = {k: v for (k, v) in inps.items() if k in input_names}