Skip to content

Commit

Permalink
fix: persist embedding type of BartConditonalGeneration after resize
Browse files Browse the repository at this point in the history
  • Loading branch information
AbdiHaryadi committed Jul 31, 2024
1 parent 35df5ed commit dc0dbc8
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 1 deletion.
3 changes: 2 additions & 1 deletion src/transformers/models/bart/modeling_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -1431,7 +1431,8 @@ def __init__(self, config: BartConfig):
super().__init__(config)

padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.shared = BartScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale)

self.encoder = BartEncoder(config, self.shared)
self.decoder = BartDecoder(config, self.shared)
Expand Down
12 changes: 12 additions & 0 deletions tests/models/bart/test_modeling_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,18 @@ def test_generate_fp16(self):
def test_load_save_without_tied_weights(self):
pass

def test_resize_embeddings_persists_embeddings_type(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()

config.scale_embedding = True
model = BartForConditionalGeneration(config)
old_type = type(model.model.decoder.embed_tokens)

model.resize_token_embeddings(new_num_tokens=config.vocab_size)

new_type = type(model.model.decoder.embed_tokens)
self.assertIs(old_type, new_type)


def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
Expand Down

0 comments on commit dc0dbc8

Please sign in to comment.