Skip to content

Commit

Permalink
Avoid using uncessary get_values(MODEL_MAPPING) (#29362)
Browse files Browse the repository at this point in the history
* more fixes

* more fixes

---------

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
  • Loading branch information
ydshieh and ydshieh authored Feb 29, 2024
1 parent b647acd commit 44fe1a1
Show file tree
Hide file tree
Showing 14 changed files with 94 additions and 85 deletions.
20 changes: 11 additions & 9 deletions tests/models/beit/test_modeling_beit.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from packaging import version

from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available

Expand All @@ -36,14 +35,13 @@
from torch import nn

from transformers import (
MODEL_FOR_BACKBONE_MAPPING,
MODEL_MAPPING,
BeitBackbone,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -312,10 +310,10 @@ def test_training(self):

for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [
*get_values(MODEL_MAPPING),
*get_values(MODEL_FOR_BACKBONE_MAPPING),
BeitForMaskedImageModeling,
if model_class.__name__ in [
*MODEL_MAPPING_NAMES.values(),
*MODEL_FOR_BACKBONE_MAPPING_NAMES.values(),
"BeitForMaskedImageModeling",
]:
continue

Expand All @@ -337,8 +335,12 @@ def test_training_gradient_checkpointing(self):
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class
in [*get_values(MODEL_MAPPING), *get_values(MODEL_FOR_BACKBONE_MAPPING), BeitForMaskedImageModeling]
model_class.__name__
in [
*MODEL_MAPPING_NAMES.values(),
*MODEL_FOR_BACKBONE_MAPPING_NAMES.values(),
"BeitForMaskedImageModeling",
]
or not model_class.supports_gradient_checkpointing
):
continue
Expand Down
6 changes: 3 additions & 3 deletions tests/models/clipseg/test_modeling_clipseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
import requests

import transformers
from transformers import MODEL_MAPPING, CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig
from transformers.models.auto import get_values
from transformers import CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig
from transformers.testing_utils import (
is_flax_available,
is_pt_flax_cross_test,
Expand All @@ -52,6 +51,7 @@
from torch import nn

from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.clipseg.modeling_clipseg import CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -751,7 +751,7 @@ def test_training(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True

if model_class in get_values(MODEL_MAPPING):
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue

print("Model class:", model_class)
Expand Down
7 changes: 3 additions & 4 deletions tests/models/data2vec/test_modeling_data2vec_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import unittest

from transformers import Data2VecVisionConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available

Expand All @@ -32,11 +31,11 @@
from torch import nn

from transformers import (
MODEL_MAPPING,
Data2VecVisionForImageClassification,
Data2VecVisionForSemanticSegmentation,
Data2VecVisionModel,
)
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.data2vec.modeling_data2vec_vision import DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -235,7 +234,7 @@ def test_training(self):
config.return_dict = True

for model_class in self.all_model_classes:
if model_class in [*get_values(MODEL_MAPPING)]:
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue

model = model_class(config)
Expand All @@ -254,7 +253,7 @@ def test_training_gradient_checkpointing(self):
config.return_dict = True

for model_class in self.all_model_classes:
if model_class in [*get_values(MODEL_MAPPING)] or not model_class.supports_gradient_checkpointing:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
# TODO: remove the following 3 lines once we have a MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
# this can then be incorporated into _prepare_for_class in test_modeling_common.py
Expand Down
19 changes: 10 additions & 9 deletions tests/models/deit/test_modeling_deit.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import warnings

from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
Expand All @@ -41,14 +40,16 @@
from torch import nn

from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -269,7 +270,7 @@ def test_training(self):
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(MODEL_MAPPING)
model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
Expand All @@ -289,7 +290,7 @@ def test_training_gradient_checkpointing(self):
config.return_dict = True

for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
Expand Down Expand Up @@ -325,10 +326,10 @@ def test_problem_types(self):

for model_class in self.all_model_classes:
if (
model_class
model_class.__name__
not in [
*get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
*MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(),
*MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
Expand Down
8 changes: 4 additions & 4 deletions tests/models/dpt/test_modeling_dpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device

from ...test_configuration_common import ConfigTester
Expand All @@ -31,7 +30,8 @@
import torch
from torch import nn

from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -214,7 +214,7 @@ def test_training(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True

if model_class in get_values(MODEL_MAPPING):
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue

model = model_class(config)
Expand All @@ -233,7 +233,7 @@ def test_training_gradient_checkpointing(self):
config.use_cache = False
config.return_dict = True

if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
model = model_class(config)
model.to(torch_device)
Expand Down
8 changes: 4 additions & 4 deletions tests/models/dpt/test_modeling_dpt_auto_backbone.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

from transformers import Dinov2Config, DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device

from ...test_configuration_common import ConfigTester
Expand All @@ -30,7 +29,8 @@
if is_torch_available():
import torch

from transformers import MODEL_MAPPING, DPTForDepthEstimation
from transformers import DPTForDepthEstimation
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -166,7 +166,7 @@ def test_training(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True

if model_class in get_values(MODEL_MAPPING):
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue

model = model_class(config)
Expand All @@ -185,7 +185,7 @@ def test_training_gradient_checkpointing(self):
config.use_cache = False
config.return_dict = True

if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
model = model_class(config)
model.to(torch_device)
Expand Down
8 changes: 4 additions & 4 deletions tests/models/dpt/test_modeling_dpt_hybrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device

from ...test_configuration_common import ConfigTester
Expand All @@ -31,7 +30,8 @@
import torch
from torch import nn

from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -229,7 +229,7 @@ def test_training(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True

if model_class in get_values(MODEL_MAPPING):
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue

model = model_class(config)
Expand All @@ -248,7 +248,7 @@ def test_training_gradient_checkpointing(self):
config.use_cache = False
config.return_dict = True

if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
model = model_class(config)
model.to(torch_device)
Expand Down
13 changes: 7 additions & 6 deletions tests/models/efficientformer/test_modeling_efficientformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from typing import List

from transformers import EfficientFormerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available

Expand All @@ -33,12 +32,14 @@
import torch

from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from transformers.models.efficientformer.modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
Expand Down Expand Up @@ -308,7 +309,7 @@ def test_training(self):
for model_class in self.all_model_classes:
# EfficientFormerForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(MODEL_MAPPING)
model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher"
):
continue
Expand All @@ -330,9 +331,9 @@ def test_problem_types(self):

for model_class in self.all_model_classes:
if (
model_class
model_class.__name__
not in [
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
*MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
]
or model_class.__name__ == "EfficientFormerForImageClassificationWithTeacher"
):
Expand Down
6 changes: 3 additions & 3 deletions tests/models/glpn/test_modeling_glpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import unittest

from transformers import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device

from ...test_configuration_common import ConfigTester
Expand All @@ -29,7 +28,8 @@
if is_torch_available():
import torch

from transformers import MODEL_MAPPING, GLPNConfig, GLPNForDepthEstimation, GLPNModel
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNModel
from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
from transformers.models.glpn.modeling_glpn import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST


Expand Down Expand Up @@ -291,7 +291,7 @@ def test_training(self):
config.return_dict = True

for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING):
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue
# TODO: remove the following 3 lines once we have a MODEL_FOR_DEPTH_ESTIMATION_MAPPING
# this can then be incorporated into _prepare_for_class in test_modeling_common.py
Expand Down
Loading

0 comments on commit 44fe1a1

Please sign in to comment.