From 5962d6f7cb4536b7db1c2032c4df2b4cb65458ef Mon Sep 17 00:00:00 2001 From: Lysandre Date: Sat, 8 Jun 2024 11:35:57 +0200 Subject: [PATCH 01/11] Import structure & first three model refactors --- Makefile | 2 - src/transformers/__init__.py | 68 +---- src/transformers/models/albert/__init__.py | 164 +---------- .../models/albert/configuration_albert.py | 6 + .../models/albert/modeling_albert.py | 23 ++ .../models/albert/modeling_flax_albert.py | 20 ++ .../models/albert/modeling_tf_albert.py | 23 ++ .../models/albert/tokenization_albert.py | 5 + .../models/albert/tokenization_albert_fast.py | 5 + src/transformers/models/align/__init__.py | 55 +--- .../models/align/configuration_align.py | 8 + .../models/align/modeling_align.py | 8 + .../models/align/processing_align.py | 6 +- src/transformers/models/altclip/__init__.py | 53 +--- .../models/altclip/configuration_altclip.py | 7 + .../models/altclip/modeling_altclip.py | 8 + .../models/altclip/processing_altclip.py | 5 + src/transformers/utils/import_utils.py | 270 +++++++++++++++++- .../import_structure_raw_register.py | 61 ++++ ...import_structure_register_with_comments.py | 69 +++++ ...port_structure_register_with_duplicates.py | 67 +++++ tests/utils/test_import_structure.py | 94 ++++++ 22 files changed, 701 insertions(+), 326 deletions(-) create mode 100644 tests/utils/import_structures/import_structure_raw_register.py create mode 100644 tests/utils/import_structures/import_structure_register_with_comments.py create mode 100644 tests/utils/import_structures/import_structure_register_with_duplicates.py create mode 100644 tests/utils/test_import_structure.py diff --git a/Makefile b/Makefile index cfa40b7bd6ee..d3998327cc71 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,6 @@ quality: @python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1) ruff check $(check_dirs) setup.py conftest.py ruff format --check $(check_dirs) setup.py conftest.py - python utils/custom_init_isort.py --check_only python utils/sort_auto_mappings.py --check_only python utils/check_doc_toc.py python utils/check_docstrings.py --check_all @@ -62,7 +61,6 @@ quality: # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: - python utils/custom_init_isort.py python utils/sort_auto_mappings.py python utils/check_doc_toc.py --fix_and_overwrite diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 4f4b17ac84f1..1039afcdaf33 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -1499,7 +1499,6 @@ "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", - "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", @@ -1523,7 +1522,6 @@ "BigBirdForQuestionAnswering", "BigBirdForSequenceClassification", "BigBirdForTokenClassification", - "BigBirdLayer", "BigBirdModel", "BigBirdPreTrainedModel", "load_tf_weights_in_big_bird", @@ -1642,7 +1640,6 @@ "CanineForQuestionAnswering", "CanineForSequenceClassification", "CanineForTokenClassification", - "CanineLayer", "CanineModel", "CaninePreTrainedModel", "load_tf_weights_in_canine", @@ -1729,7 +1726,6 @@ "ConvBertForQuestionAnswering", "ConvBertForSequenceClassification", "ConvBertForTokenClassification", - "ConvBertLayer", "ConvBertModel", "ConvBertPreTrainedModel", "load_tf_weights_in_convbert", @@ -1958,7 +1954,6 @@ "QDQBertForQuestionAnswering", "QDQBertForSequenceClassification", "QDQBertForTokenClassification", - "QDQBertLayer", "QDQBertLMHeadModel", "QDQBertModel", "QDQBertPreTrainedModel", @@ -2210,7 +2205,6 @@ "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", - "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] @@ -2311,7 +2305,6 @@ "GPTNeoXForQuestionAnswering", "GPTNeoXForSequenceClassification", "GPTNeoXForTokenClassification", - "GPTNeoXLayer", "GPTNeoXModel", "GPTNeoXPreTrainedModel", ] @@ -2319,7 +2312,6 @@ _import_structure["models.gpt_neox_japanese"].extend( [ "GPTNeoXJapaneseForCausalLM", - "GPTNeoXJapaneseLayer", "GPTNeoXJapaneseModel", "GPTNeoXJapanesePreTrainedModel", ] @@ -2551,7 +2543,6 @@ "LongformerForTokenClassification", "LongformerModel", "LongformerPreTrainedModel", - "LongformerSelfAttention", ] ) _import_structure["models.longt5"].extend( @@ -2584,7 +2575,6 @@ "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", - "LxmertXLayer", ] ) _import_structure["models.m2m_100"].extend( @@ -2608,7 +2598,9 @@ "Mamba2PreTrainedModel", ] ) - _import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"]) + _import_structure["models.marian"].extend( + ["MarianForCausalLM", "MarianModel", "MarianMTModel", "MarianPreTrainedModel"] + ) _import_structure["models.markuplm"].extend( [ "MarkupLMForQuestionAnswering", @@ -2691,7 +2683,6 @@ "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", - "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", @@ -2737,7 +2728,6 @@ "MPNetForQuestionAnswering", "MPNetForSequenceClassification", "MPNetForTokenClassification", - "MPNetLayer", "MPNetModel", "MPNetPreTrainedModel", ] @@ -2827,7 +2817,6 @@ "NystromformerForQuestionAnswering", "NystromformerForSequenceClassification", "NystromformerForTokenClassification", - "NystromformerLayer", "NystromformerModel", "NystromformerPreTrainedModel", ] @@ -2941,7 +2930,6 @@ "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", - "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] @@ -3077,11 +3065,9 @@ ) _import_structure["models.reformer"].extend( [ - "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", - "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", @@ -3102,7 +3088,6 @@ "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", - "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", @@ -3149,7 +3134,6 @@ "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", - "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", @@ -3163,7 +3147,6 @@ "RoFormerForQuestionAnswering", "RoFormerForSequenceClassification", "RoFormerForTokenClassification", - "RoFormerLayer", "RoFormerModel", "RoFormerPreTrainedModel", "load_tf_weights_in_roformer", @@ -3220,7 +3203,6 @@ "SegformerDecodeHead", "SegformerForImageClassification", "SegformerForSemanticSegmentation", - "SegformerLayer", "SegformerModel", "SegformerPreTrainedModel", ] @@ -3279,7 +3261,6 @@ [ "SplinterForPreTraining", "SplinterForQuestionAnswering", - "SplinterLayer", "SplinterModel", "SplinterPreTrainedModel", ] @@ -3292,7 +3273,6 @@ "SqueezeBertForSequenceClassification", "SqueezeBertForTokenClassification", "SqueezeBertModel", - "SqueezeBertModule", "SqueezeBertPreTrainedModel", ] ) @@ -3491,7 +3471,6 @@ "ViltForMaskedLM", "ViltForQuestionAnswering", "ViltForTokenClassification", - "ViltLayer", "ViltModel", "ViltPreTrainedModel", ] @@ -3511,7 +3490,6 @@ "VisualBertForQuestionAnswering", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", - "VisualBertLayer", "VisualBertModel", "VisualBertPreTrainedModel", ] @@ -3527,7 +3505,6 @@ _import_structure["models.vit_mae"].extend( [ "ViTMAEForPreTraining", - "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] @@ -3707,7 +3684,6 @@ "YosoForQuestionAnswering", "YosoForSequenceClassification", "YosoForTokenClassification", - "YosoLayer", "YosoModel", "YosoPreTrainedModel", ] @@ -3854,7 +3830,6 @@ ) _import_structure["models.bert"].extend( [ - "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", @@ -4151,7 +4126,6 @@ "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerPreTrainedModel", - "TFLongformerSelfAttention", ] ) _import_structure["models.lxmert"].extend( @@ -5827,7 +5801,8 @@ from .models.llama import LlamaTokenizer from .models.m2m_100 import M2M100Tokenizer from .models.marian import MarianTokenizer - from .models.mbart import MBart50Tokenizer, MBartTokenizer + from .models.mbart import MBartTokenizer + from .models.mbart50 import MBart50Tokenizer from .models.mluke import MLukeTokenizer from .models.mt5 import MT5Tokenizer from .models.nllb import NllbTokenizer @@ -6298,7 +6273,6 @@ BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, - BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, @@ -6318,7 +6292,6 @@ BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, - BigBirdLayer, BigBirdModel, BigBirdPreTrainedModel, load_tf_weights_in_big_bird, @@ -6413,7 +6386,6 @@ CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, - CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, @@ -6486,7 +6458,6 @@ ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, - ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, @@ -6671,7 +6642,6 @@ QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, - QDQBertLayer, QDQBertLMHeadModel, QDQBertModel, QDQBertPreTrainedModel, @@ -6870,7 +6840,6 @@ FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, - FNetLayer, FNetModel, FNetPreTrainedModel, ) @@ -6958,13 +6927,11 @@ GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, - GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) from .models.gpt_neox_japanese import ( GPTNeoXJapaneseForCausalLM, - GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) @@ -7140,7 +7107,6 @@ LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, - LongformerSelfAttention, ) from .models.longt5 import ( LongT5EncoderModel, @@ -7167,7 +7133,6 @@ LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, - LxmertXLayer, ) from .models.m2m_100 import ( M2M100ForConditionalGeneration, @@ -7184,7 +7149,7 @@ Mamba2Model, Mamba2PreTrainedModel, ) - from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel + from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel, MarianPreTrainedModel from .models.markuplm import ( MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, @@ -7250,7 +7215,6 @@ MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, - MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, @@ -7286,7 +7250,6 @@ MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, - MPNetLayer, MPNetModel, MPNetPreTrainedModel, ) @@ -7358,7 +7321,6 @@ NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, - NystromformerLayer, NystromformerModel, NystromformerPreTrainedModel, ) @@ -7446,7 +7408,6 @@ PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, - PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) @@ -7548,11 +7509,9 @@ RecurrentGemmaPreTrainedModel, ) from .models.reformer import ( - ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, - ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, @@ -7569,7 +7528,6 @@ RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, - RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, @@ -7608,7 +7566,6 @@ RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, - RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, @@ -7620,7 +7577,6 @@ RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, - RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, @@ -7665,7 +7621,6 @@ SegformerDecodeHead, SegformerForImageClassification, SegformerForSemanticSegmentation, - SegformerLayer, SegformerModel, SegformerPreTrainedModel, ) @@ -7710,7 +7665,6 @@ from .models.splinter import ( SplinterForPreTraining, SplinterForQuestionAnswering, - SplinterLayer, SplinterModel, SplinterPreTrainedModel, ) @@ -7721,7 +7675,6 @@ SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, - SqueezeBertModule, SqueezeBertPreTrainedModel, ) from .models.stablelm import ( @@ -7870,7 +7823,6 @@ ViltForMaskedLM, ViltForQuestionAnswering, ViltForTokenClassification, - ViltLayer, ViltModel, ViltPreTrainedModel, ) @@ -7886,7 +7838,6 @@ VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, - VisualBertLayer, VisualBertModel, VisualBertPreTrainedModel, ) @@ -7898,7 +7849,6 @@ ) from .models.vit_mae import ( ViTMAEForPreTraining, - ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) @@ -8040,7 +7990,6 @@ YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, - YosoLayer, YosoModel, YosoPreTrainedModel, ) @@ -8174,7 +8123,6 @@ TFBartPretrainedModel, ) from .models.bert import ( - TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, @@ -8228,7 +8176,6 @@ TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, - TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) @@ -8413,7 +8360,6 @@ TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, - TFLongformerSelfAttention, ) from .models.lxmert import ( TFLxmertForPreTraining, @@ -8503,7 +8449,6 @@ TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, - TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) @@ -8541,7 +8486,6 @@ TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, - TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) diff --git a/src/transformers/models/albert/__init__.py b/src/transformers/models/albert/__init__.py index 1d0a4a4d0284..57b5747909e0 100644 --- a/src/transformers/models/albert/__init__.py +++ b/src/transformers/models/albert/__init__.py @@ -11,165 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_albert": ["AlbertConfig", "AlbertOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_albert"] = ["AlbertTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_albert"] = [ - "AlbertForMaskedLM", - "AlbertForMultipleChoice", - "AlbertForPreTraining", - "AlbertForQuestionAnswering", - "AlbertForSequenceClassification", - "AlbertForTokenClassification", - "AlbertModel", - "AlbertPreTrainedModel", - "load_tf_weights_in_albert", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_albert"] = [ - "TFAlbertForMaskedLM", - "TFAlbertForMultipleChoice", - "TFAlbertForPreTraining", - "TFAlbertForQuestionAnswering", - "TFAlbertForSequenceClassification", - "TFAlbertForTokenClassification", - "TFAlbertMainLayer", - "TFAlbertModel", - "TFAlbertPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_albert"] = [ - "FlaxAlbertForMaskedLM", - "FlaxAlbertForMultipleChoice", - "FlaxAlbertForPreTraining", - "FlaxAlbertForQuestionAnswering", - "FlaxAlbertForSequenceClassification", - "FlaxAlbertForTokenClassification", - "FlaxAlbertModel", - "FlaxAlbertPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_albert import AlbertConfig, AlbertOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_albert import AlbertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_albert_fast import AlbertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_albert import ( - AlbertForMaskedLM, - AlbertForMultipleChoice, - AlbertForPreTraining, - AlbertForQuestionAnswering, - AlbertForSequenceClassification, - AlbertForTokenClassification, - AlbertModel, - AlbertPreTrainedModel, - load_tf_weights_in_albert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_albert import ( - TFAlbertForMaskedLM, - TFAlbertForMultipleChoice, - TFAlbertForPreTraining, - TFAlbertForQuestionAnswering, - TFAlbertForSequenceClassification, - TFAlbertForTokenClassification, - TFAlbertMainLayer, - TFAlbertModel, - TFAlbertPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_albert import ( - FlaxAlbertForMaskedLM, - FlaxAlbertForMultipleChoice, - FlaxAlbertForPreTraining, - FlaxAlbertForQuestionAnswering, - FlaxAlbertForSequenceClassification, - FlaxAlbertForTokenClassification, - FlaxAlbertModel, - FlaxAlbertPreTrainedModel, - ) + from .configuration_albert import * + from .modeling_albert import * + from .modeling_flax_albert import * + from .modeling_tf_albert import * + from .tokenization_albert import * + from .tokenization_albert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/albert/configuration_albert.py b/src/transformers/models/albert/configuration_albert.py index bae88486e102..33f4d6212a97 100644 --- a/src/transformers/models/albert/configuration_albert.py +++ b/src/transformers/models/albert/configuration_albert.py @@ -20,8 +20,10 @@ from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig +from ...utils.import_utils import register +@register() class AlbertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used @@ -151,6 +153,7 @@ def __init__( # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert +@register() class AlbertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: @@ -165,3 +168,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["AlbertConfig", "AlbertOnnxConfig"] diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index 6ccb266009e1..d9e1172d712b 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -49,6 +49,7 @@ logging, replace_return_docstrings, ) +from ...utils.import_utils import register from .configuration_albert import AlbertConfig @@ -58,6 +59,7 @@ _CONFIG_FOR_DOC = "AlbertConfig" +@register() def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: @@ -553,6 +555,7 @@ def forward( ) +@register(backends=("torch",)) class AlbertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -685,6 +688,7 @@ class AlbertForPreTrainingOutput(ModelOutput): "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig base_model_prefix = "albert" @@ -831,6 +835,7 @@ def forward( """, ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertForPreTraining(AlbertPreTrainedModel): _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"] @@ -983,6 +988,7 @@ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor: "Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertForMaskedLM(AlbertPreTrainedModel): _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"] @@ -1098,6 +1104,7 @@ def forward( """, ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1199,6 +1206,7 @@ def forward( """, ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertForTokenClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1282,6 +1290,7 @@ def forward( """, ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1385,6 +1394,7 @@ def forward( """, ALBERT_START_DOCSTRING, ) +@register(backends=("torch",)) class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1466,3 +1476,16 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_albert", + "AlbertPreTrainedModel", + "AlbertModel", + "AlbertForPreTraining", + "AlbertForMaskedLM", + "AlbertForSequenceClassification", + "AlbertForTokenClassification", + "AlbertForQuestionAnswering", + "AlbertForMultipleChoice", +] diff --git a/src/transformers/models/albert/modeling_flax_albert.py b/src/transformers/models/albert/modeling_flax_albert.py index b2c01ded3619..1bc5f0f4ff07 100644 --- a/src/transformers/models/albert/modeling_flax_albert.py +++ b/src/transformers/models/albert/modeling_flax_albert.py @@ -42,6 +42,7 @@ overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from ...utils.import_utils import register from .configuration_albert import AlbertConfig @@ -505,6 +506,7 @@ def __call__(self, pooled_output, deterministic=True): return logits +@register(backends=("flax",)) class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -674,6 +676,7 @@ def __call__( "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) +@register(backends=("flax",)) class FlaxAlbertModel(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertModule @@ -742,6 +745,7 @@ def __call__( """, ALBERT_START_DOCSTRING, ) +@register(backends=("flax",)) class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForPreTrainingModule @@ -825,6 +829,7 @@ def __call__( @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) +@register(backends=("flax",)) class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMaskedLMModule @@ -895,6 +900,7 @@ def __call__( """, ALBERT_START_DOCSTRING, ) +@register(backends=("flax",)) class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForSequenceClassificationModule @@ -968,6 +974,7 @@ def __call__( """, ALBERT_START_DOCSTRING, ) +@register(backends=("flax",)) class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMultipleChoiceModule @@ -1041,6 +1048,7 @@ def __call__( """, ALBERT_START_DOCSTRING, ) +@register(backends=("flax",)) class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForTokenClassificationModule @@ -1109,6 +1117,7 @@ def __call__( """, ALBERT_START_DOCSTRING, ) +@register(backends=("flax",)) class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForQuestionAnsweringModule @@ -1119,3 +1128,14 @@ class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel): FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxAlbertPreTrainedModel", + "FlaxAlbertModel", + "FlaxAlbertForPreTraining", + "FlaxAlbertForMaskedLM", + "FlaxAlbertForSequenceClassification", + "FlaxAlbertForMultipleChoice", + "FlaxAlbertForTokenClassification", + "FlaxAlbertForQuestionAnswering", +] diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index 3a50eeb20ea7..c443421b9b90 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -56,6 +56,7 @@ logging, replace_return_docstrings, ) +from ...utils.import_utils import register from .configuration_albert import AlbertConfig @@ -510,6 +511,7 @@ def build(self, input_shape=None): layer.build(None) +@register(backends=("tf",)) class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -585,6 +587,7 @@ def call(self, hidden_states: tf.Tensor) -> tf.Tensor: @keras_serializable +@register(backends=("tf",)) class TFAlbertMainLayer(keras.layers.Layer): config_class = AlbertConfig @@ -858,6 +861,7 @@ class TFAlbertForPreTrainingOutput(ModelOutput): "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) +@register(backends=("tf",)) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) @@ -915,6 +919,7 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) +@register(backends=("tf",)) class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"] @@ -1046,6 +1051,7 @@ def build(self, input_shape=None): @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) +@register(backends=("tf",)) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"] @@ -1159,6 +1165,7 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) +@register(backends=("tf",)) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions"] @@ -1253,6 +1260,7 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) +@register(backends=("tf",)) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] @@ -1348,6 +1356,7 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) +@register(backends=("tf",)) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] @@ -1455,6 +1464,7 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) +@register(backends=("tf",)) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] @@ -1558,3 +1568,16 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFAlbertPreTrainedModel", + "TFAlbertModel", + "TFAlbertForPreTraining", + "TFAlbertForMaskedLM", + "TFAlbertForSequenceClassification", + "TFAlbertForTokenClassification", + "TFAlbertForQuestionAnswering", + "TFAlbertForMultipleChoice", + "TFAlbertMainLayer", +] diff --git a/src/transformers/models/albert/tokenization_albert.py b/src/transformers/models/albert/tokenization_albert.py index 4068c7aad876..f8d1a38eaee8 100644 --- a/src/transformers/models/albert/tokenization_albert.py +++ b/src/transformers/models/albert/tokenization_albert.py @@ -23,6 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import register logger = logging.get_logger(__name__) @@ -32,6 +33,7 @@ SPIECE_UNDERLINE = "▁" +@register(backends=("sentencepiece",)) class AlbertTokenizer(PreTrainedTokenizer): """ Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -343,3 +345,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["AlbertTokenizer"] diff --git a/src/transformers/models/albert/tokenization_albert_fast.py b/src/transformers/models/albert/tokenization_albert_fast.py index eadfdcecfc5c..264fe4ebdf16 100644 --- a/src/transformers/models/albert/tokenization_albert_fast.py +++ b/src/transformers/models/albert/tokenization_albert_fast.py @@ -21,6 +21,7 @@ from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging +from ...utils.import_utils import register if is_sentencepiece_available(): @@ -35,6 +36,7 @@ SPIECE_UNDERLINE = "▁" +@register(backends=("tokenizers",)) class AlbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on @@ -207,3 +209,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["AlbertTokenizerFast"] diff --git a/src/transformers/models/align/__init__.py b/src/transformers/models/align/__init__.py index 650b25c3e5d1..aaa64dfb6064 100644 --- a/src/transformers/models/align/__init__.py +++ b/src/transformers/models/align/__init__.py @@ -13,57 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_align": [ - "AlignConfig", - "AlignTextConfig", - "AlignVisionConfig", - ], - "processing_align": ["AlignProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_align"] = [ - "AlignModel", - "AlignPreTrainedModel", - "AlignTextModel", - "AlignVisionModel", - ] - if TYPE_CHECKING: - from .configuration_align import ( - AlignConfig, - AlignTextConfig, - AlignVisionConfig, - ) - from .processing_align import AlignProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_align import ( - AlignModel, - AlignPreTrainedModel, - AlignTextModel, - AlignVisionModel, - ) - + from .configuration_align import * + from .modeling_align import * + from .processing_align import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/align/configuration_align.py b/src/transformers/models/align/configuration_align.py index efec77b4b312..55bf04aeb84c 100644 --- a/src/transformers/models/align/configuration_align.py +++ b/src/transformers/models/align/configuration_align.py @@ -17,6 +17,8 @@ import os from typing import TYPE_CHECKING, List, Union +from ...utils.import_utils import register + if TYPE_CHECKING: pass @@ -28,6 +30,7 @@ logger = logging.get_logger(__name__) +@register() class AlignTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a @@ -152,6 +155,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) +@register() class AlignVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a @@ -291,6 +295,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) +@register() class AlignConfig(PretrainedConfig): r""" [`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to @@ -378,3 +383,6 @@ def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: A """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["AlignTextConfig", "AlignVisionConfig", "AlignConfig"] diff --git a/src/transformers/models/align/modeling_align.py b/src/transformers/models/align/modeling_align.py index 1b744d0f208d..fee92dbfe7fe 100644 --- a/src/transformers/models/align/modeling_align.py +++ b/src/transformers/models/align/modeling_align.py @@ -38,6 +38,7 @@ logging, replace_return_docstrings, ) +from ...utils.import_utils import register from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig @@ -1165,6 +1166,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return pooled_output +@register(backends=("torch",)) class AlignPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1198,6 +1200,7 @@ def _init_weights(self, module): """The text model from ALIGN without any head or projection on top.""", ALIGN_START_DOCSTRING, ) +@register(backends=("torch",)) class AlignTextModel(AlignPreTrainedModel): config_class = AlignTextConfig _no_split_modules = ["AlignTextEmbeddings"] @@ -1325,6 +1328,7 @@ def forward( """The vision model from ALIGN without any head or projection on top.""", ALIGN_START_DOCSTRING, ) +@register(backends=("torch",)) class AlignVisionModel(AlignPreTrainedModel): config_class = AlignVisionConfig main_input_name = "pixel_values" @@ -1411,6 +1415,7 @@ def forward( @add_start_docstrings(ALIGN_START_DOCSTRING) +@register(backends=("torch",)) class AlignModel(AlignPreTrainedModel): config_class = AlignConfig @@ -1636,3 +1641,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["AlignPreTrainedModel", "AlignTextModel", "AlignVisionModel", "AlignModel"] diff --git a/src/transformers/models/align/processing_align.py b/src/transformers/models/align/processing_align.py index 5fdaf0514048..546805b30ce1 100644 --- a/src/transformers/models/align/processing_align.py +++ b/src/transformers/models/align/processing_align.py @@ -16,7 +16,7 @@ Image/Text processor class for ALIGN """ -from typing import List, Union +from ...utils.import_utils import register try: @@ -42,6 +42,7 @@ class AlignProcessorKwargs(ProcessingKwargs, total=False): } +@register() class AlignProcessor(ProcessorMixin): r""" Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and @@ -162,3 +163,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["AlignProcessor"] diff --git a/src/transformers/models/altclip/__init__.py b/src/transformers/models/altclip/__init__.py index 4e3cb99bbb16..a30de8a25275 100755 --- a/src/transformers/models/altclip/__init__.py +++ b/src/transformers/models/altclip/__init__.py @@ -13,55 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_altclip": [ - "AltCLIPConfig", - "AltCLIPTextConfig", - "AltCLIPVisionConfig", - ], - "processing_altclip": ["AltCLIPProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_altclip"] = [ - "AltCLIPPreTrainedModel", - "AltCLIPModel", - "AltCLIPTextModel", - "AltCLIPVisionModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_altclip import ( - AltCLIPConfig, - AltCLIPTextConfig, - AltCLIPVisionConfig, - ) - from .processing_altclip import AltCLIPProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_altclip import ( - AltCLIPModel, - AltCLIPPreTrainedModel, - AltCLIPTextModel, - AltCLIPVisionModel, - ) - - + from .configuration_altclip import * + from .modeling_altclip import * + from .processing_altclip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/altclip/configuration_altclip.py b/src/transformers/models/altclip/configuration_altclip.py index 1cefeccd347a..836b5707cf19 100755 --- a/src/transformers/models/altclip/configuration_altclip.py +++ b/src/transformers/models/altclip/configuration_altclip.py @@ -19,11 +19,13 @@ from ...configuration_utils import PretrainedConfig from ...utils import logging +from ...utils.import_utils import register logger = logging.get_logger(__name__) +@register() class AltCLIPTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a @@ -142,6 +144,7 @@ def __init__( self.project_dim = project_dim +@register() class AltCLIPVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an @@ -252,6 +255,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) +@register() class AltCLIPConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an @@ -398,3 +402,6 @@ def from_text_vision_configs(cls, text_config: AltCLIPTextConfig, vision_config: """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["AltCLIPTextConfig", "AltCLIPVisionConfig", "AltCLIPConfig"] diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index 0d344cc54b13..059e1372c115 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -33,6 +33,7 @@ from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from ...utils.import_utils import register from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig @@ -1021,6 +1022,7 @@ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: return embeddings +@register(backends=("torch",)) class AltCLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1136,6 +1138,7 @@ def forward( ) +@register(backends=("torch",)) class AltCLIPVisionModel(AltCLIPPreTrainedModel): config_class = AltCLIPVisionConfig main_input_name = "pixel_values" @@ -1367,6 +1370,7 @@ def forward( ) +@register(backends=("torch",)) class AltCLIPTextModel(AltCLIPPreTrainedModel): config_class = AltCLIPTextConfig @@ -1459,6 +1463,7 @@ def forward( ) +@register(backends=("torch",)) class AltCLIPModel(AltCLIPPreTrainedModel): config_class = AltCLIPConfig @@ -1694,3 +1699,6 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = ["AltCLIPPreTrainedModel", "AltCLIPVisionModel", "AltCLIPTextModel", "AltCLIPModel"] diff --git a/src/transformers/models/altclip/processing_altclip.py b/src/transformers/models/altclip/processing_altclip.py index 2814b2d7f26e..e787217dc77c 100644 --- a/src/transformers/models/altclip/processing_altclip.py +++ b/src/transformers/models/altclip/processing_altclip.py @@ -20,8 +20,10 @@ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding +from ...utils.import_utils import register +@register() class AltCLIPProcessor(ProcessorMixin): r""" Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single @@ -130,3 +132,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["AltCLIPProcessor"] diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 8ae133d0ffe0..66e270ba5a99 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -1375,6 +1375,11 @@ def is_liger_kernel_available(): Please note that you may need to restart your runtime after installation. """ +# docstyle-ignore +TORCHAUDIO_IMPORT_ERROR = """ +{0} requires the torchaudio library but it was not found in your environment. Please install it and restart your +runtime. +""" # docstyle-ignore PANDAS_IMPORT_ERROR = """ @@ -1550,6 +1555,7 @@ def is_liger_kernel_available(): ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), + ("torchaudio", (is_torchaudio_available, TORCHAUDIO_IMPORT_ERROR)), ("natten", (is_natten_available, NATTEN_IMPORT_ERROR)), ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), @@ -1617,19 +1623,73 @@ class _LazyModule(ModuleType): # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) - self._modules = set(import_structure.keys()) - self._class_to_module = {} - for key, values in import_structure.items(): - for value in values: - self._class_to_module[value] = key - # Needed for autocompletion in an IDE - self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) - self.__file__ = module_file - self.__spec__ = module_spec - self.__path__ = [os.path.dirname(module_file)] - self._objects = {} if extra_objects is None else extra_objects - self._name = name - self._import_structure = import_structure + + if any(isinstance(key, frozenset) for key in import_structure.keys()): + PER_BACKEND_SPLIT = True + else: + PER_BACKEND_SPLIT = False + + self._object_missing_backend = {} + + if PER_BACKEND_SPLIT: + self._modules = set() + self._class_to_module = {} + self.__all__ = [] + + _import_structure = {} + + for backends, item in import_structure.items(): + lacking_backends = [] + for backend in backends: + if backend not in BACKENDS_MAPPING: + raise ValueError( + f"Error: the following backend: '{backend}' was specified around object {item} but isn't specified in the backends mapping." + ) + callable, error = BACKENDS_MAPPING[backend] + if not callable(): + lacking_backends.append(backend) + + self._modules.union(set(item.keys())) + for key, values in item.items(): + for value in values: + self._class_to_module[value] = key + + if key not in _import_structure: + _import_structure[key] = values + else: + _import_structure[key].extend(values) + + # Needed for autocompletion in an IDE + self.__all__.extend(list(item.keys()) + list(chain(*item.values()))) + + if len(lacking_backends): + for module, objects in item.items(): + for obj in objects: + self._object_missing_backend[obj] = lacking_backends + self._object_missing_backend[module] = lacking_backends + + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = _import_structure + + # This can be removed once every exportable object has a `register()` export. + if not PER_BACKEND_SPLIT: + self._modules = set(import_structure.keys()) + self._class_to_module = {} + for key, values in import_structure.items(): + for value in values: + self._class_to_module[value] = key + # Needed for autocompletion in an IDE + self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = import_structure # Needed for autocompletion in an IDE def __dir__(self): @@ -1646,6 +1706,20 @@ def __getattr__(self, name: str) -> Any: return self._objects[name] if name in self._modules: value = self._get_module(name) + elif name in self._object_missing_backend.keys(): + missing_backends = self._object_missing_backend[name] + + class Placeholder(metaclass=DummyObject): + _backends = missing_backends + + def __init__(self, *args, **kwargs): + requires_backends(self, missing_backends) + + # Placeholder.__class__.__name__ = name + Placeholder.__name__ = name + Placeholder.__module__ = self.__spec__ + + value = Placeholder elif name in self._class_to_module.keys(): module = self._get_module(self._class_to_module[name]) value = getattr(module, name) @@ -1689,3 +1763,173 @@ def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: spec.loader.exec_module(module) module = sys.modules[name] return module + + +def register(*, backends=()): + """ + This method enables two things: + - Attaching a `__backends` tuple to an object to see what are the necessary backends for it + to execute correctly without instantiating it + - The '@register' string is used to dynamically import objects + """ + + if not isinstance(backends, tuple): + raise ValueError("Backends should be a tuple.") + + def inner_fn(fun): + fun.__backends = backends + return fun + + return inner_fn + + +@lru_cache() +def define_import_structure(module_path): + import_structure = {} + if os.path.isdir(module_path): + for f in os.listdir(module_path): + if f != "__pycache__" and os.path.isdir(os.path.join(module_path, f)): + import_structure[f] = define_import_structure(os.path.join(module_path, f)) + directory = module_path + else: + directory = os.path.dirname(module_path) + + adjacent_modules = [f for f in os.listdir(directory) if not os.path.isdir(os.path.join(directory, f))] + + if "__init__.py" in adjacent_modules: + adjacent_modules.remove("__init__.py") + + module_requirements = {} + for module_name in adjacent_modules: + with open(os.path.join(directory, module_name)) as f: + file_content = f.read() + + # Remove the .py suffix + if module_name.endswith(".py"): + module_name = module_name[:-3] + + previous_line = "" + previous_index = 0 + + lines = file_content.split("\n") + for index, line in enumerate(lines): + # This allows registering items with other decorators. We'll take a look + # at the line that follows at the same indentation level. + if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@register"): + continue + + # Skipping line enables putting whatever we want between the + # register() call and the actuall class/method definition. + # This is what enables having # Copied from statements, docs, etc. + skip_line = False + + if "@register" in previous_line: + skip_line = False + + # Backends are defined on the same line as register + if "backends" in previous_line: + backends_string = previous_line.split("backends=")[1].split("(")[1].split(")")[0] + backends = tuple(sorted([b.strip("'\",") for b in backends_string.split(", ")])) + + # Backends are defined in the lines following register, for example such as: + # @register( + # backends=( + # "sentencepiece", + # "torch", + # "tf", + # ) + # ) + # + # or + # + # @register( + # backends=( + # "sentencepiece", "tf" + # ) + # ) + elif "backends" in lines[previous_index + 1]: + backends = [] + for backend_line in lines[previous_index:index]: + if "backends" in backend_line: + backend_line = backend_line.split("=")[1] + if '"' in backend_line or "'" in backend_line: + if ", " in backend_line: + backends.extend(backend.strip("()\"', ") for backend in backend_line.split(", ")) + else: + backends.append(backend_line.strip("()\"', ")) + + # If the line is only a ')', then we reached the end of the backends and we break. + if backend_line.strip() == ")": + break + backends = tuple(backends) + + # No backends are registered + else: + backends = () + + backends = frozenset(backends) + if backends not in module_requirements: + module_requirements[backends] = {} + if module_name not in module_requirements[backends]: + module_requirements[backends][module_name] = [] + + if not line.startswith("class") and not line.startswith("def"): + skip_line = True + else: + start_index = 6 if line.startswith("class") else 4 + object_name = line[start_index:].split("(")[0].strip(":") + module_requirements[backends][module_name].append(object_name) + + if not skip_line: + previous_line = line + previous_index = index + + import_structure = {**module_requirements, **import_structure} + return import_structure + + +def spread_import_structure(nested_import_structure): + def propagate_tuple(unordered_import_structure): + tuple_first_import_structure = {} + for _key, _value in unordered_import_structure.items(): + if not isinstance(_value, dict): + tuple_first_import_structure[_key] = _value + + elif any(isinstance(v, tuple) for v in _value.keys()): + # Here we want to switch around key and v + for k, v in _value.items(): + if isinstance(k, tuple): + if k not in tuple_first_import_structure: + tuple_first_import_structure[k] = {} + tuple_first_import_structure[k][_key] = v + + else: + tuple_first_import_structure[_key] = propagate_tuple(_value) + + return tuple_first_import_structure + + def flatten_dict(_dict, previous_key=None): + items = [] + for _key, _value in _dict.items(): + _key = f"{previous_key}.{_key}" if previous_key is not None else _key + if isinstance(_value, dict): + items.extend(flatten_dict(_value, _key).items()) + else: + items.append((_key, _value)) + return dict(items) + + # The tuples contain the necessary backends. We want these first, so we propagate them up the + # import structure. + ordered_import_structure = nested_import_structure + for i in range(6): + ordered_import_structure = propagate_tuple(ordered_import_structure) + + # We then flatten the dict so that it references a module path. + flattened_import_structure = {} + for key, value in ordered_import_structure.copy().items(): + if isinstance(key, str): + del ordered_import_structure[key] + else: + flattened_import_structure[key] = flatten_dict(value) + + return flattened_import_structure diff --git a/tests/utils/import_structures/import_structure_raw_register.py b/tests/utils/import_structures/import_structure_raw_register.py new file mode 100644 index 000000000000..0a74438a0173 --- /dev/null +++ b/tests/utils/import_structures/import_structure_raw_register.py @@ -0,0 +1,61 @@ +# fmt: off + +from transformers.utils.import_utils import register + + +@register() +class A0: + def __init__(self): + pass + + +@register() +def a0(): + pass + + +@register(backends=("torch", "tf")) +class A1: + def __init__(self): + pass + + +@register(backends=("torch", "tf")) +def a1(): + pass + + +@register( + backends=("torch", "tf") +) +class A2: + def __init__(self): + pass + + +@register( + backends=("torch", "tf") +) +def a2(): + pass + + +@register( + backends=( + "torch", + "tf" + ) +) +class A3: + def __init__(self): + pass + + +@register( + backends=( + "torch", + "tf" + ) +) +def a3(): + pass diff --git a/tests/utils/import_structures/import_structure_register_with_comments.py b/tests/utils/import_structures/import_structure_register_with_comments.py new file mode 100644 index 000000000000..21828d19d795 --- /dev/null +++ b/tests/utils/import_structures/import_structure_register_with_comments.py @@ -0,0 +1,69 @@ +# fmt: off + +from transformers.utils.import_utils import register + + +@register() +# That's a statement +class B0: + def __init__(self): + pass + + +@register() +# That's a statement +def b0(): + pass + + +@register(backends=("torch", "tf")) +# That's a statement +class B1: + def __init__(self): + pass + + +@register(backends=("torch", "tf")) +# That's a statement +def b1(): + pass + + +@register( + backends=("torch", "tf") +) +# That's a statement +class B2: + def __init__(self): + pass + + +@register( + backends=("torch", "tf") +) +# That's a statement +def b2(): + pass + + +@register( + backends=( + "torch", + "tf" + ) +) +# That's a statement +class B3: + def __init__(self): + pass + + +@register( + backends=( + "torch", + "tf" + ) +) +# That's a statement +def b3(): + pass diff --git a/tests/utils/import_structures/import_structure_register_with_duplicates.py b/tests/utils/import_structures/import_structure_register_with_duplicates.py new file mode 100644 index 000000000000..13f79d051e1d --- /dev/null +++ b/tests/utils/import_structures/import_structure_register_with_duplicates.py @@ -0,0 +1,67 @@ +# fmt: off + +from transformers.utils.import_utils import register + + +@register(backends=("torch", "torch")) +class C0: + def __init__(self): + pass + + +@register(backends=("torch", "torch")) +def c0(): + pass + + +@register(backends=("torch", "torch")) +# That's a statement +class C1: + def __init__(self): + pass + + +@register(backends=("torch", "torch")) +# That's a statement +def c1(): + pass + + +@register( + backends=("torch", "torch") +) +# That's a statement +class C2: + def __init__(self): + pass + + +@register( + backends=("torch", "torch") +) +# That's a statement +def c2(): + pass + + +@register( + backends=( + "torch", + "torch" + ) +) +# That's a statement +class C3: + def __init__(self): + pass + + +@register( + backends=( + "torch", + "torch" + ) +) +# That's a statement +def c3(): + pass diff --git a/tests/utils/test_import_structure.py b/tests/utils/test_import_structure.py new file mode 100644 index 000000000000..b1e100dde626 --- /dev/null +++ b/tests/utils/test_import_structure.py @@ -0,0 +1,94 @@ +import os +import unittest +from pathlib import Path + +from transformers.utils.import_utils import define_import_structure, spread_import_structure + + +import_structures = Path("import_structures") + + +def fetch__all__(file_content): + """ + Returns the content of the __all__ variable in the file content. + Returns None if not defined, otherwise returns a list of strings. + """ + lines = file_content.split("\n") + for line_index in range(len(lines)): + line = lines[line_index] + if line.startswith("__all__ = "): + # __all__ is defined on a single line + if line.endswith("]"): + return [obj.strip("\"' ") for obj in line.split("=")[1].strip(" []").split(",")] + + # __all__ is defined on multiple lines + else: + _all = [] + for __all__line_index in range(line_index + 1, len(lines)): + if lines[__all__line_index].strip() == "]": + return _all + else: + _all.append(lines[__all__line_index].strip("\"', ")) + + +class TestImportStructures(unittest.TestCase): + base_transformers_path = Path(__file__).parent.parent.parent + models_path = base_transformers_path / "src" / "transformers" / "models" + models_import_structure = spread_import_structure(define_import_structure(models_path)) + + def test_definition(self): + import_structure = define_import_structure(import_structures) + import_structure_definition = { + frozenset(()): { + "import_structure_raw_register": ["A0", "a0"], + "import_structure_register_with_comments": ["B0", "b0"], + }, + frozenset(("tf", "torch")): { + "import_structure_raw_register": ["A1", "a1", "A2", "a2", "A3", "a3"], + "import_structure_register_with_comments": ["B1", "b1", "B2", "b2", "B3", "b3"], + }, + frozenset(("torch",)): { + "import_structure_register_with_duplicates": ["C0", "c0", "C1", "c1", "C2", "c2", "C3", "c3"], + }, + } + + self.assertDictEqual(import_structure, import_structure_definition) + + def test_transformers_specific_model_import(self): + """ + This test ensures that there is equivalence between what is written down in __all__ and what is + written down with register(). + + It doesn't test the backends attributed to register(). + """ + for architecture in os.listdir(self.models_path): + if ( + os.path.isfile(self.models_path / architecture) + or architecture.startswith("_") + or architecture == "deprecated" + ): + continue + + with self.subTest(f"Testing arch {architecture}"): + import_structure = define_import_structure(self.models_path / architecture) + backend_agnostic_import_structure = {} + for requirement, module_object_mapping in import_structure.items(): + for module, objects in module_object_mapping.items(): + if module not in backend_agnostic_import_structure: + backend_agnostic_import_structure[module] = [] + + backend_agnostic_import_structure[module].extend(objects) + + for module, objects in backend_agnostic_import_structure.items(): + with open(self.models_path / architecture / f"{module}.py") as f: + content = f.read() + _all = fetch__all__(content) + + if _all is None: + raise ValueError(f"{module} doesn't have __all__ defined.") + + error_message = ( + f"self.models_path / architecture / f'{module}.py doesn't seem to be defined correctly:\n" + f"Defined in __all__: {sorted(_all)}\nDefined with register: {sorted(objects)}" + ) + self.assertListEqual(sorted(objects), sorted(_all), msg=error_message) From af3b2251d1abf7e153c3f6f5493924039ba8a43c Mon Sep 17 00:00:00 2001 From: Lysandre Date: Thu, 25 Jul 2024 14:09:48 +0200 Subject: [PATCH 02/11] Register -> Export. Export all in __all__. Sensible defaults according to filename. --- .../models/albert/configuration_albert.py | 3 - .../models/albert/modeling_albert.py | 10 - .../models/albert/modeling_flax_albert.py | 9 - .../models/albert/modeling_tf_albert.py | 10 - .../models/albert/tokenization_albert.py | 4 +- .../models/albert/tokenization_albert_fast.py | 2 - .../models/align/configuration_align.py | 5 - .../models/align/modeling_align.py | 6 +- .../models/align/processing_align.py | 3 - .../models/altclip/configuration_altclip.py | 4 - .../models/altclip/modeling_altclip.py | 5 - .../models/altclip/processing_altclip.py | 2 - src/transformers/utils/import_utils.py | 227 ++++++++++++------ .../import_structure_raw_register.py | 23 +- ...import_structure_register_with_comments.py | 30 +-- ...port_structure_register_with_duplicates.py | 22 +- tests/utils/test_import_structure.py | 10 +- 17 files changed, 197 insertions(+), 178 deletions(-) diff --git a/src/transformers/models/albert/configuration_albert.py b/src/transformers/models/albert/configuration_albert.py index 33f4d6212a97..e1e2d4547cc4 100644 --- a/src/transformers/models/albert/configuration_albert.py +++ b/src/transformers/models/albert/configuration_albert.py @@ -20,10 +20,8 @@ from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig -from ...utils.import_utils import register -@register() class AlbertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used @@ -153,7 +151,6 @@ def __init__( # Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Roberta->Albert -@register() class AlbertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index d9e1172d712b..dca1fe7f6002 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -49,7 +49,6 @@ logging, replace_return_docstrings, ) -from ...utils.import_utils import register from .configuration_albert import AlbertConfig @@ -59,7 +58,6 @@ _CONFIG_FOR_DOC = "AlbertConfig" -@register() def load_tf_weights_in_albert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: @@ -555,7 +553,6 @@ def forward( ) -@register(backends=("torch",)) class AlbertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -688,7 +685,6 @@ class AlbertForPreTrainingOutput(ModelOutput): "The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig base_model_prefix = "albert" @@ -835,7 +831,6 @@ def forward( """, ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertForPreTraining(AlbertPreTrainedModel): _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"] @@ -988,7 +983,6 @@ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor: "Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertForMaskedLM(AlbertPreTrainedModel): _tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"] @@ -1104,7 +1098,6 @@ def forward( """, ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1206,7 +1199,6 @@ def forward( """, ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertForTokenClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1290,7 +1282,6 @@ def forward( """, ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) @@ -1394,7 +1385,6 @@ def forward( """, ALBERT_START_DOCSTRING, ) -@register(backends=("torch",)) class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) diff --git a/src/transformers/models/albert/modeling_flax_albert.py b/src/transformers/models/albert/modeling_flax_albert.py index 1bc5f0f4ff07..b5b49219aebf 100644 --- a/src/transformers/models/albert/modeling_flax_albert.py +++ b/src/transformers/models/albert/modeling_flax_albert.py @@ -42,7 +42,6 @@ overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging -from ...utils.import_utils import register from .configuration_albert import AlbertConfig @@ -506,7 +505,6 @@ def __call__(self, pooled_output, deterministic=True): return logits -@register(backends=("flax",)) class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -676,7 +674,6 @@ def __call__( "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) -@register(backends=("flax",)) class FlaxAlbertModel(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertModule @@ -745,7 +742,6 @@ def __call__( """, ALBERT_START_DOCSTRING, ) -@register(backends=("flax",)) class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForPreTrainingModule @@ -829,7 +825,6 @@ def __call__( @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) -@register(backends=("flax",)) class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMaskedLMModule @@ -900,7 +895,6 @@ def __call__( """, ALBERT_START_DOCSTRING, ) -@register(backends=("flax",)) class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForSequenceClassificationModule @@ -974,7 +968,6 @@ def __call__( """, ALBERT_START_DOCSTRING, ) -@register(backends=("flax",)) class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMultipleChoiceModule @@ -1048,7 +1041,6 @@ def __call__( """, ALBERT_START_DOCSTRING, ) -@register(backends=("flax",)) class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForTokenClassificationModule @@ -1117,7 +1109,6 @@ def __call__( """, ALBERT_START_DOCSTRING, ) -@register(backends=("flax",)) class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForQuestionAnsweringModule diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index c443421b9b90..24a25658a4d4 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -56,7 +56,6 @@ logging, replace_return_docstrings, ) -from ...utils.import_utils import register from .configuration_albert import AlbertConfig @@ -511,7 +510,6 @@ def build(self, input_shape=None): layer.build(None) -@register(backends=("tf",)) class TFAlbertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -587,7 +585,6 @@ def call(self, hidden_states: tf.Tensor) -> tf.Tensor: @keras_serializable -@register(backends=("tf",)) class TFAlbertMainLayer(keras.layers.Layer): config_class = AlbertConfig @@ -861,7 +858,6 @@ class TFAlbertForPreTrainingOutput(ModelOutput): "The bare Albert Model transformer outputting raw hidden-states without any specific head on top.", ALBERT_START_DOCSTRING, ) -@register(backends=("tf",)) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) @@ -919,7 +915,6 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) -@register(backends=("tf",)) class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"] @@ -1051,7 +1046,6 @@ def build(self, input_shape=None): @add_start_docstrings("""Albert Model with a `language modeling` head on top.""", ALBERT_START_DOCSTRING) -@register(backends=("tf",)) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"] @@ -1165,7 +1159,6 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) -@register(backends=("tf",)) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"predictions"] @@ -1260,7 +1253,6 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) -@register(backends=("tf",)) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] @@ -1356,7 +1348,6 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) -@register(backends=("tf",)) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] @@ -1464,7 +1455,6 @@ def build(self, input_shape=None): """, ALBERT_START_DOCSTRING, ) -@register(backends=("tf",)) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"] diff --git a/src/transformers/models/albert/tokenization_albert.py b/src/transformers/models/albert/tokenization_albert.py index f8d1a38eaee8..4971d0511f47 100644 --- a/src/transformers/models/albert/tokenization_albert.py +++ b/src/transformers/models/albert/tokenization_albert.py @@ -23,7 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging -from ...utils.import_utils import register +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -33,7 +33,7 @@ SPIECE_UNDERLINE = "▁" -@register(backends=("sentencepiece",)) +@export(backends=("sentencepiece",)) class AlbertTokenizer(PreTrainedTokenizer): """ Construct an ALBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). diff --git a/src/transformers/models/albert/tokenization_albert_fast.py b/src/transformers/models/albert/tokenization_albert_fast.py index 264fe4ebdf16..6e7b110b0afa 100644 --- a/src/transformers/models/albert/tokenization_albert_fast.py +++ b/src/transformers/models/albert/tokenization_albert_fast.py @@ -21,7 +21,6 @@ from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging -from ...utils.import_utils import register if is_sentencepiece_available(): @@ -36,7 +35,6 @@ SPIECE_UNDERLINE = "▁" -@register(backends=("tokenizers",)) class AlbertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" ALBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on diff --git a/src/transformers/models/align/configuration_align.py b/src/transformers/models/align/configuration_align.py index 55bf04aeb84c..99fa81b4a935 100644 --- a/src/transformers/models/align/configuration_align.py +++ b/src/transformers/models/align/configuration_align.py @@ -17,8 +17,6 @@ import os from typing import TYPE_CHECKING, List, Union -from ...utils.import_utils import register - if TYPE_CHECKING: pass @@ -30,7 +28,6 @@ logger = logging.get_logger(__name__) -@register() class AlignTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a @@ -155,7 +152,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) -@register() class AlignVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a @@ -295,7 +291,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) -@register() class AlignConfig(PretrainedConfig): r""" [`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to diff --git a/src/transformers/models/align/modeling_align.py b/src/transformers/models/align/modeling_align.py index fee92dbfe7fe..dcaa38be5750 100644 --- a/src/transformers/models/align/modeling_align.py +++ b/src/transformers/models/align/modeling_align.py @@ -20,6 +20,7 @@ import torch import torch.utils.checkpoint +from IPython.terminal.pt_inputhooks import backends from torch import nn from ...activations import ACT2FN @@ -38,7 +39,6 @@ logging, replace_return_docstrings, ) -from ...utils.import_utils import register from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig @@ -1166,7 +1166,6 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return pooled_output -@register(backends=("torch",)) class AlignPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1200,7 +1199,6 @@ def _init_weights(self, module): """The text model from ALIGN without any head or projection on top.""", ALIGN_START_DOCSTRING, ) -@register(backends=("torch",)) class AlignTextModel(AlignPreTrainedModel): config_class = AlignTextConfig _no_split_modules = ["AlignTextEmbeddings"] @@ -1328,7 +1326,6 @@ def forward( """The vision model from ALIGN without any head or projection on top.""", ALIGN_START_DOCSTRING, ) -@register(backends=("torch",)) class AlignVisionModel(AlignPreTrainedModel): config_class = AlignVisionConfig main_input_name = "pixel_values" @@ -1415,7 +1412,6 @@ def forward( @add_start_docstrings(ALIGN_START_DOCSTRING) -@register(backends=("torch",)) class AlignModel(AlignPreTrainedModel): config_class = AlignConfig diff --git a/src/transformers/models/align/processing_align.py b/src/transformers/models/align/processing_align.py index 546805b30ce1..923daee965fb 100644 --- a/src/transformers/models/align/processing_align.py +++ b/src/transformers/models/align/processing_align.py @@ -16,8 +16,6 @@ Image/Text processor class for ALIGN """ -from ...utils.import_utils import register - try: from typing import Unpack @@ -42,7 +40,6 @@ class AlignProcessorKwargs(ProcessingKwargs, total=False): } -@register() class AlignProcessor(ProcessorMixin): r""" Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and diff --git a/src/transformers/models/altclip/configuration_altclip.py b/src/transformers/models/altclip/configuration_altclip.py index 836b5707cf19..7333fa63a352 100755 --- a/src/transformers/models/altclip/configuration_altclip.py +++ b/src/transformers/models/altclip/configuration_altclip.py @@ -19,13 +19,11 @@ from ...configuration_utils import PretrainedConfig from ...utils import logging -from ...utils.import_utils import register logger = logging.get_logger(__name__) -@register() class AltCLIPTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a @@ -144,7 +142,6 @@ def __init__( self.project_dim = project_dim -@register() class AltCLIPVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an @@ -255,7 +252,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], return cls.from_dict(config_dict, **kwargs) -@register() class AltCLIPConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index 059e1372c115..4ed0930605e8 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -33,7 +33,6 @@ from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings -from ...utils.import_utils import register from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig @@ -1022,7 +1021,6 @@ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: return embeddings -@register(backends=("torch",)) class AltCLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1138,7 +1136,6 @@ def forward( ) -@register(backends=("torch",)) class AltCLIPVisionModel(AltCLIPPreTrainedModel): config_class = AltCLIPVisionConfig main_input_name = "pixel_values" @@ -1370,7 +1367,6 @@ def forward( ) -@register(backends=("torch",)) class AltCLIPTextModel(AltCLIPPreTrainedModel): config_class = AltCLIPTextConfig @@ -1463,7 +1459,6 @@ def forward( ) -@register(backends=("torch",)) class AltCLIPModel(AltCLIPPreTrainedModel): config_class = AltCLIPConfig diff --git a/src/transformers/models/altclip/processing_altclip.py b/src/transformers/models/altclip/processing_altclip.py index e787217dc77c..534349884283 100644 --- a/src/transformers/models/altclip/processing_altclip.py +++ b/src/transformers/models/altclip/processing_altclip.py @@ -20,10 +20,8 @@ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding -from ...utils.import_utils import register -@register() class AltCLIPProcessor(ProcessorMixin): r""" Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 66e270ba5a99..3fe846f897db 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -27,8 +27,9 @@ from functools import lru_cache from itertools import chain from types import ModuleType -from typing import Any, Optional, Tuple, Union +from typing import Any, Optional, Tuple, Union, List +from nltk.downloader import update from packaging import version from . import logging @@ -1657,7 +1658,7 @@ def __init__(self, name, module_file, import_structure, module_spec=None, extra_ if key not in _import_structure: _import_structure[key] = values else: - _import_structure[key].extend(values) + _import_structure[key].update(values) # Needed for autocompletion in an IDE self.__all__.extend(list(item.keys()) + list(chain(*item.values()))) @@ -1675,7 +1676,7 @@ def __init__(self, name, module_file, import_structure, module_spec=None, extra_ self._name = name self._import_structure = _import_structure - # This can be removed once every exportable object has a `register()` export. + # This can be removed once every exportable object has a `export()` export. if not PER_BACKEND_SPLIT: self._modules = set(import_structure.keys()) self._class_to_module = {} @@ -1765,12 +1766,12 @@ def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: return module -def register(*, backends=()): +def export(*, backends=()): """ This method enables two things: - Attaching a `__backends` tuple to an object to see what are the necessary backends for it to execute correctly without instantiating it - - The '@register' string is used to dynamically import objects + - The '@export' string is used to dynamically import objects """ if not isinstance(backends, tuple): @@ -1783,6 +1784,51 @@ def inner_fn(fun): return inner_fn +BASE_FILE_REQUIREMENTS = { + lambda e: 'modeling_tf_' in e: ('tf',), + lambda e: 'modeling_flax_' in e: ('flax',), + lambda e: 'modeling_' in e: ('torch',), + lambda e: e.startswith('tokenization_') and e.endswith('_fast'): ('tokenizers',), +} + + +def fetch__all__(file_content): + """ + Returns the content of the __all__ variable in the file content. + Returns None if not defined, otherwise returns a list of strings. + """ + + if '__all__' not in file_content: + return [] + + lines = file_content.splitlines() + for index, line in enumerate(lines): + if line.startswith("__all__"): + start_index = index + + lines = lines[start_index:] + + if not lines[0].startswith('__all__'): + raise ValueError( + "fetch__all__ accepts a list of lines, with the first line being the __all__ variable declaration" + ) + + # __all__ is defined on a single line + if lines[0].endswith("]"): + return [obj.strip("\"' ") for obj in lines[0].split("=")[1].strip(" []").split(",")] + + # __all__ is defined on multiple lines + else: + _all = [] + for __all__line_index in range(1, len(lines)): + if lines[__all__line_index].strip() == "]": + return _all + else: + _all.append(lines[__all__line_index].strip("\"', ")) + + return _all + + @lru_cache() def define_import_structure(module_path): import_structure = {} @@ -1796,6 +1842,9 @@ def define_import_structure(module_path): adjacent_modules = [f for f in os.listdir(directory) if not os.path.isdir(os.path.join(directory, f))] + # We're only taking a look at files different from __init__.py + # We could theoretically export things directly from the __init__.py + # files, but this is not supported at this time. if "__init__.py" in adjacent_modules: adjacent_modules.remove("__init__.py") @@ -1811,78 +1860,108 @@ def define_import_structure(module_path): previous_line = "" previous_index = 0 - lines = file_content.split("\n") - for index, line in enumerate(lines): - # This allows registering items with other decorators. We'll take a look - # at the line that follows at the same indentation level. - if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@register"): - continue + # Some files have some requirements by default. + # For example, any file named `modeling_tf_xxx.py` + # should have TensorFlow as a required backend. + base_requirements = () + for string_check, requirements in BASE_FILE_REQUIREMENTS.items(): + if string_check(module_name): + base_requirements = requirements + break + + # Objects that have a `@export` assigned to them will get exported + # with the backends specified in the decorator as well as the file backends. + registered_objects = set() + if '@export' in file_content: + lines = file_content.split("\n") + for index, line in enumerate(lines): + + # This allows exporting items with other decorators. We'll take a look + # at the line that follows at the same indentation level. + if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@export"): + continue + + # Skipping line enables putting whatever we want between the + # export() call and the actual class/method definition. + # This is what enables having # Copied from statements, docs, etc. + skip_line = False - # Skipping line enables putting whatever we want between the - # register() call and the actuall class/method definition. - # This is what enables having # Copied from statements, docs, etc. - skip_line = False + if "@export" in previous_line: + skip_line = False + + # Backends are defined on the same line as export + if "backends" in previous_line: + backends_string = previous_line.split("backends=")[1].split("(")[1].split(")")[0] + backends = tuple(sorted([b.strip("'\",") for b in backends_string.split(", ") if b])) + + # Backends are defined in the lines following export, for example such as: + # @export( + # backends=( + # "sentencepiece", + # "torch", + # "tf", + # ) + # ) + # + # or + # + # @export( + # backends=( + # "sentencepiece", "tf" + # ) + # ) + elif "backends" in lines[previous_index + 1]: + backends = [] + for backend_line in lines[previous_index:index]: + if "backends" in backend_line: + backend_line = backend_line.split("=")[1] + if '"' in backend_line or "'" in backend_line: + if ", " in backend_line: + backends.extend(backend.strip("()\"', ") for backend in backend_line.split(", ")) + else: + backends.append(backend_line.strip("()\"', ")) + + # If the line is only a ')', then we reached the end of the backends and we break. + if backend_line.strip() == ")": + break + backends = tuple(backends) + + # No backends are registered for export + else: + backends = () - if "@register" in previous_line: - skip_line = False + backends = frozenset(backends + base_requirements) + if backends not in module_requirements: + module_requirements[backends] = {} + if module_name not in module_requirements[backends]: + module_requirements[backends][module_name] = set() - # Backends are defined on the same line as register - if "backends" in previous_line: - backends_string = previous_line.split("backends=")[1].split("(")[1].split(")")[0] - backends = tuple(sorted([b.strip("'\",") for b in backends_string.split(", ")])) - - # Backends are defined in the lines following register, for example such as: - # @register( - # backends=( - # "sentencepiece", - # "torch", - # "tf", - # ) - # ) - # - # or - # - # @register( - # backends=( - # "sentencepiece", "tf" - # ) - # ) - elif "backends" in lines[previous_index + 1]: - backends = [] - for backend_line in lines[previous_index:index]: - if "backends" in backend_line: - backend_line = backend_line.split("=")[1] - if '"' in backend_line or "'" in backend_line: - if ", " in backend_line: - backends.extend(backend.strip("()\"', ") for backend in backend_line.split(", ")) - else: - backends.append(backend_line.strip("()\"', ")) - - # If the line is only a ')', then we reached the end of the backends and we break. - if backend_line.strip() == ")": - break - backends = tuple(backends) - - # No backends are registered - else: - backends = () - - backends = frozenset(backends) - if backends not in module_requirements: - module_requirements[backends] = {} - if module_name not in module_requirements[backends]: - module_requirements[backends][module_name] = [] - - if not line.startswith("class") and not line.startswith("def"): - skip_line = True - else: - start_index = 6 if line.startswith("class") else 4 - object_name = line[start_index:].split("(")[0].strip(":") - module_requirements[backends][module_name].append(object_name) - - if not skip_line: - previous_line = line - previous_index = index + if not line.startswith("class") and not line.startswith("def"): + skip_line = True + else: + start_index = 6 if line.startswith("class") else 4 + object_name = line[start_index:].split("(")[0].strip(":") + module_requirements[backends][module_name].add(object_name) + registered_objects.add(object_name) + + if not skip_line: + previous_line = line + previous_index = index + + # All objects that are in __all__ should be exported by default. + # These objects are exported with the file backends. + if '__all__' in file_content: + _all = fetch__all__(file_content) + + backends = frozenset(base_requirements) + if backends not in module_requirements: + module_requirements[backends] = {} + if module_name not in module_requirements[backends]: + module_requirements[backends][module_name] = set() + + for _all_object in _all: + if _all_object not in registered_objects: + module_requirements[backends][module_name].add(_all_object) import_structure = {**module_requirements, **import_structure} return import_structure diff --git a/tests/utils/import_structures/import_structure_raw_register.py b/tests/utils/import_structures/import_structure_raw_register.py index 0a74438a0173..3f838ccdaa64 100644 --- a/tests/utils/import_structures/import_structure_raw_register.py +++ b/tests/utils/import_structures/import_structure_raw_register.py @@ -1,31 +1,31 @@ # fmt: off -from transformers.utils.import_utils import register +from transformers.utils.import_utils import export -@register() +@export() class A0: def __init__(self): pass -@register() +@export() def a0(): pass -@register(backends=("torch", "tf")) +@export(backends=("torch", "tf")) class A1: def __init__(self): pass -@register(backends=("torch", "tf")) +@export(backends=("torch", "tf")) def a1(): pass -@register( +@export( backends=("torch", "tf") ) class A2: @@ -33,14 +33,14 @@ def __init__(self): pass -@register( +@export( backends=("torch", "tf") ) def a2(): pass -@register( +@export( backends=( "torch", "tf" @@ -51,7 +51,7 @@ def __init__(self): pass -@register( +@export( backends=( "torch", "tf" @@ -59,3 +59,8 @@ def __init__(self): ) def a3(): pass + +@export(backends=()) +class A4: + def __init__(self): + pass diff --git a/tests/utils/import_structures/import_structure_register_with_comments.py b/tests/utils/import_structures/import_structure_register_with_comments.py index 21828d19d795..e716f0ebca05 100644 --- a/tests/utils/import_structures/import_structure_register_with_comments.py +++ b/tests/utils/import_structures/import_structure_register_with_comments.py @@ -1,55 +1,51 @@ # fmt: off -from transformers.utils.import_utils import register +from transformers.utils.import_utils import export -@register() +@export() # That's a statement class B0: def __init__(self): pass -@register() +@export() # That's a statement def b0(): pass -@register(backends=("torch", "tf")) +@export(backends=("torch", "tf")) # That's a statement class B1: def __init__(self): pass -@register(backends=("torch", "tf")) +@export(backends=("torch", "tf")) # That's a statement def b1(): pass -@register( - backends=("torch", "tf") -) +@export(backends=("torch", "tf")) # That's a statement class B2: def __init__(self): pass -@register( - backends=("torch", "tf") -) +@export(backends=("torch", "tf")) # That's a statement def b2(): pass -@register( +@export( backends=( - "torch", - "tf" + "torch", + "tf" ) ) # That's a statement @@ -58,10 +54,10 @@ def __init__(self): pass -@register( +@export( backends=( - "torch", - "tf" + "torch", + "tf" ) ) # That's a statement diff --git a/tests/utils/import_structures/import_structure_register_with_duplicates.py b/tests/utils/import_structures/import_structure_register_with_duplicates.py index 13f79d051e1d..509c8f64052b 100644 --- a/tests/utils/import_structures/import_structure_register_with_duplicates.py +++ b/tests/utils/import_structures/import_structure_register_with_duplicates.py @@ -1,50 +1,46 @@ # fmt: off -from transformers.utils.import_utils import register +from transformers.utils.import_utils import export -@register(backends=("torch", "torch")) +@export(backends=("torch", "torch")) class C0: def __init__(self): pass -@register(backends=("torch", "torch")) +@export(backends=("torch", "torch")) def c0(): pass -@register(backends=("torch", "torch")) +@export(backends=("torch", "torch")) # That's a statement class C1: def __init__(self): pass -@register(backends=("torch", "torch")) +@export(backends=("torch", "torch")) # That's a statement def c1(): pass -@register( - backends=("torch", "torch") -) +@export(backends=("torch", "torch")) # That's a statement class C2: def __init__(self): pass -@register( - backends=("torch", "torch") -) +@export(backends=("torch", "torch")) # That's a statement def c2(): pass -@register( +@export( backends=( "torch", "torch" @@ -56,7 +52,7 @@ def __init__(self): pass -@register( +@export( backends=( "torch", "torch" diff --git a/tests/utils/test_import_structure.py b/tests/utils/test_import_structure.py index b1e100dde626..ae28f31b2559 100644 --- a/tests/utils/test_import_structure.py +++ b/tests/utils/test_import_structure.py @@ -40,15 +40,15 @@ def test_definition(self): import_structure = define_import_structure(import_structures) import_structure_definition = { frozenset(()): { - "import_structure_raw_register": ["A0", "a0"], - "import_structure_register_with_comments": ["B0", "b0"], + "import_structure_raw_register": {"A0", "a0", "A4"}, + "import_structure_register_with_comments": {"B0", "b0"}, }, frozenset(("tf", "torch")): { - "import_structure_raw_register": ["A1", "a1", "A2", "a2", "A3", "a3"], - "import_structure_register_with_comments": ["B1", "b1", "B2", "b2", "B3", "b3"], + "import_structure_raw_register": {"A1", "a1", "A2", "a2", "A3", "a3"}, + "import_structure_register_with_comments": {"B1", "b1", "B2", "b2", "B3", "b3"}, }, frozenset(("torch",)): { - "import_structure_register_with_duplicates": ["C0", "c0", "C1", "c1", "C2", "c2", "C3", "c3"], + "import_structure_register_with_duplicates": {"C0", "c0", "C1", "c1", "C2", "c2", "C3", "c3"}, }, } From 5a293ea34d7289c25e55bb4adff7250dfba874a3 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Thu, 25 Jul 2024 15:46:43 +0200 Subject: [PATCH 03/11] Apply most comments from Amy and some comments from Lucain Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: Lucain Pouget --- src/transformers/utils/import_utils.py | 114 ++++++++++++------ .../import_structure_raw_register.py | 14 +++ ...import_structure_register_with_comments.py | 14 +++ ...port_structure_register_with_duplicates.py | 14 +++ 4 files changed, 117 insertions(+), 39 deletions(-) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 3fe846f897db..e3e65822beff 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -1625,22 +1625,16 @@ class _LazyModule(ModuleType): def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): super().__init__(name) - if any(isinstance(key, frozenset) for key in import_structure.keys()): - PER_BACKEND_SPLIT = True - else: - PER_BACKEND_SPLIT = False - self._object_missing_backend = {} - - if PER_BACKEND_SPLIT: + if any(isinstance(key, frozenset) for key in import_structure.keys()): self._modules = set() self._class_to_module = {} self.__all__ = [] _import_structure = {} - for backends, item in import_structure.items(): - lacking_backends = [] + for backends, module in import_structure.items(): + missing_backends = [] for backend in backends: if backend not in BACKENDS_MAPPING: raise ValueError( @@ -1648,26 +1642,22 @@ def __init__(self, name, module_file, import_structure, module_spec=None, extra_ ) callable, error = BACKENDS_MAPPING[backend] if not callable(): - lacking_backends.append(backend) + missing_backends.append(backend) + self._modules.union(set(module.keys())) + + for key, values in module.items(): + if len(missing_backends): + self._object_missing_backend[key] = missing_backends - self._modules.union(set(item.keys())) - for key, values in item.items(): for value in values: self._class_to_module[value] = key - - if key not in _import_structure: - _import_structure[key] = values - else: - _import_structure[key].update(values) + if len(missing_backends): + self._object_missing_backend[value] = missing_backends + _import_structure.setdefault(key, []).extend(values) # Needed for autocompletion in an IDE - self.__all__.extend(list(item.keys()) + list(chain(*item.values()))) + self.__all__.extend(list(module.keys()) + list(chain(*module.values()))) - if len(lacking_backends): - for module, objects in item.items(): - for obj in objects: - self._object_missing_backend[obj] = lacking_backends - self._object_missing_backend[module] = lacking_backends self.__file__ = module_file self.__spec__ = module_spec @@ -1677,7 +1667,7 @@ def __init__(self, name, module_file, import_structure, module_spec=None, extra_ self._import_structure = _import_structure # This can be removed once every exportable object has a `export()` export. - if not PER_BACKEND_SPLIT: + else: self._modules = set(import_structure.keys()) self._class_to_module = {} for key, values in import_structure.items(): @@ -1716,7 +1706,6 @@ class Placeholder(metaclass=DummyObject): def __init__(self, *args, **kwargs): requires_backends(self, missing_backends) - # Placeholder.__class__.__name__ = name Placeholder.__name__ = name Placeholder.__module__ = self.__spec__ @@ -1768,7 +1757,7 @@ def direct_transformers_import(path: str, file="__init__.py") -> ModuleType: def export(*, backends=()): """ - This method enables two things: + This decorator enables two things: - Attaching a `__backends` tuple to an object to see what are the necessary backends for it to execute correctly without instantiating it - The '@export' string is used to dynamically import objects @@ -1951,16 +1940,15 @@ def define_import_structure(module_path): # All objects that are in __all__ should be exported by default. # These objects are exported with the file backends. if '__all__' in file_content: - _all = fetch__all__(file_content) + for _all_object in fetch__all__(file_content): + if _all_object not in registered_objects: - backends = frozenset(base_requirements) - if backends not in module_requirements: - module_requirements[backends] = {} - if module_name not in module_requirements[backends]: - module_requirements[backends][module_name] = set() + backends = frozenset(base_requirements) + if backends not in module_requirements: + module_requirements[backends] = {} + if module_name not in module_requirements[backends]: + module_requirements[backends][module_name] = set() - for _all_object in _all: - if _all_object not in registered_objects: module_requirements[backends][module_name].add(_all_object) import_structure = {**module_requirements, **import_structure} @@ -1968,22 +1956,67 @@ def define_import_structure(module_path): def spread_import_structure(nested_import_structure): - def propagate_tuple(unordered_import_structure): + """ + This method takes as input an unordered import structure and brings the required backends at the top-level, + aggregating modules and objects under their required backends. + + Here's an example of an input import structure at the src.transformers.models level: + + { + 'albert': { + frozenset(): { + 'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'} + }, + frozenset({'tokenizers'}): { + 'tokenization_albert_fast': {'AlbertTokenizerFast'} + }, + }, + 'align': { + frozenset(): { + 'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, + 'processing_align': {'AlignProcessor'} + }, + }, + 'altclip': { + frozenset(): { + 'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, + 'processing_altclip': {'AltCLIPProcessor'}, + } + } + } + + Here's an example of an output import structure at the src.transformers.models level: + + { + frozenset({'tokenizers'}): { + 'albert.tokenization_albert_fast': {'AlbertTokenizerFast'} + }, + frozenset(): { + 'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}, + 'align.processing_align': {'AlignProcessor'}, + 'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, + 'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, + 'altclip.processing_altclip': {'AltCLIPProcessor'} + } + } + + """ + def propagate_frozenset(unordered_import_structure): tuple_first_import_structure = {} for _key, _value in unordered_import_structure.items(): if not isinstance(_value, dict): tuple_first_import_structure[_key] = _value - elif any(isinstance(v, tuple) for v in _value.keys()): + elif any(isinstance(v, frozenset) for v in _value.keys()): # Here we want to switch around key and v for k, v in _value.items(): - if isinstance(k, tuple): + if isinstance(k, frozenset): if k not in tuple_first_import_structure: tuple_first_import_structure[k] = {} tuple_first_import_structure[k][_key] = v else: - tuple_first_import_structure[_key] = propagate_tuple(_value) + tuple_first_import_structure[_key] = propagate_frozenset(_value) return tuple_first_import_structure @@ -2000,8 +2033,11 @@ def flatten_dict(_dict, previous_key=None): # The tuples contain the necessary backends. We want these first, so we propagate them up the # import structure. ordered_import_structure = nested_import_structure + + # 6 is a number that gives us sufficient depth to go through all files and foreseeable folder depths + # while not taking too long to parse. for i in range(6): - ordered_import_structure = propagate_tuple(ordered_import_structure) + ordered_import_structure = propagate_frozenset(ordered_import_structure) # We then flatten the dict so that it references a module path. flattened_import_structure = {} diff --git a/tests/utils/import_structures/import_structure_raw_register.py b/tests/utils/import_structures/import_structure_raw_register.py index 3f838ccdaa64..47f2ba84f1ef 100644 --- a/tests/utils/import_structures/import_structure_raw_register.py +++ b/tests/utils/import_structures/import_structure_raw_register.py @@ -1,3 +1,17 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # fmt: off from transformers.utils.import_utils import export diff --git a/tests/utils/import_structures/import_structure_register_with_comments.py b/tests/utils/import_structures/import_structure_register_with_comments.py index e716f0ebca05..18dfd40193c1 100644 --- a/tests/utils/import_structures/import_structure_register_with_comments.py +++ b/tests/utils/import_structures/import_structure_register_with_comments.py @@ -1,3 +1,17 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # fmt: off from transformers.utils.import_utils import export diff --git a/tests/utils/import_structures/import_structure_register_with_duplicates.py b/tests/utils/import_structures/import_structure_register_with_duplicates.py index 509c8f64052b..01842c71a1ff 100644 --- a/tests/utils/import_structures/import_structure_register_with_duplicates.py +++ b/tests/utils/import_structures/import_structure_register_with_duplicates.py @@ -1,3 +1,17 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # fmt: off from transformers.utils.import_utils import export From 1f528f9c7e740c97776fdb856ce497adfc06e4c4 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Thu, 25 Jul 2024 15:49:54 +0200 Subject: [PATCH 04/11] Style --- src/transformers/models/align/modeling_align.py | 1 - src/transformers/utils/import_utils.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/transformers/models/align/modeling_align.py b/src/transformers/models/align/modeling_align.py index dcaa38be5750..dea035618a33 100644 --- a/src/transformers/models/align/modeling_align.py +++ b/src/transformers/models/align/modeling_align.py @@ -20,7 +20,6 @@ import torch import torch.utils.checkpoint -from IPython.terminal.pt_inputhooks import backends from torch import nn from ...activations import ACT2FN diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index e3e65822beff..1807b3750360 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -29,7 +29,6 @@ from types import ModuleType from typing import Any, Optional, Tuple, Union, List -from nltk.downloader import update from packaging import version from . import logging @@ -1638,7 +1637,7 @@ def __init__(self, name, module_file, import_structure, module_spec=None, extra_ for backend in backends: if backend not in BACKENDS_MAPPING: raise ValueError( - f"Error: the following backend: '{backend}' was specified around object {item} but isn't specified in the backends mapping." + f"Error: the following backend: '{backend}' was specified around object {module} but isn't specified in the backends mapping." ) callable, error = BACKENDS_MAPPING[backend] if not callable(): From 226c8ec143faa03b44f2bd2e39a2f513bb2e4b00 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Thu, 25 Jul 2024 16:12:08 +0200 Subject: [PATCH 05/11] Add comment --- src/transformers/utils/import_utils.py | 52 ++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 1807b3750360..627434688738 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -1819,6 +1819,58 @@ def fetch__all__(file_content): @lru_cache() def define_import_structure(module_path): + """ + This method takes the path to a file/a folder and returns the import structure. + If a file is given, it will return the import structure of the parent folder. + + Import structures are designed to be digestible by `_LazyModule` objects. They are + created from the __all__ definitions in each files as well as the `@export` decorators + above methods and objects. + + The import structure allows explicit display of the required backends for a given object. + These backends are specified in two ways: + + 1. Through their `@export`, if they are exported with that decorator. This `@export` decorator + accepts a `backend` tuple kwarg mentioning which backends are required to run this object. + + 2. If an object is defined in a file with "default" backends, it will have, at a minimum, this + backend specified. The default backends are defined according to the filename: + + - If a file is named like `modeling_*.py`, it will have a `torch` backend + - If a file is named like `modeling_tf_*.py`, it will have a `tf` backend + - If a file is named like `modeling_flax_*.py`, it will have a `flax` backend + - If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend + + Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed. + Should an object be imported without its required backends being in the environment, any attempt to use the + object will raise an error mentioning which backend(s) should be added to the environment in order to use + that object. + + Here's an example of an input import structure at the src.transformers.models level: + + { + 'albert': { + frozenset(): { + 'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'} + }, + frozenset({'tokenizers'}): { + 'tokenization_albert_fast': {'AlbertTokenizerFast'} + }, + }, + 'align': { + frozenset(): { + 'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, + 'processing_align': {'AlignProcessor'} + }, + }, + 'altclip': { + frozenset(): { + 'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, + 'processing_altclip': {'AltCLIPProcessor'}, + } + } + } + """ import_structure = {} if os.path.isdir(module_path): for f in os.listdir(module_path): From 2a727f66043847111b5f7abd7c9d5f396ae62269 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Thu, 25 Jul 2024 16:19:33 +0200 Subject: [PATCH 06/11] Clearer .py management --- src/transformers/utils/import_utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 627434688738..7e7e2cbb8bb9 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -1890,12 +1890,16 @@ def define_import_structure(module_path): module_requirements = {} for module_name in adjacent_modules: + + # Only modules ending in `.py` are accepted here. + if not module_name.endswith('.py'): + continue + with open(os.path.join(directory, module_name)) as f: file_content = f.read() # Remove the .py suffix - if module_name.endswith(".py"): - module_name = module_name[:-3] + module_name = module_name[:-3] previous_line = "" previous_index = 0 From 818b572acc4347dceea7d9bd4ab35a55a91c72d6 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Fri, 26 Jul 2024 14:37:28 +0200 Subject: [PATCH 07/11] Raise if not in backend mapping --- src/transformers/utils/import_utils.py | 9 ++++--- .../utils/import_structures/failing_export.py | 24 +++++++++++++++++++ tests/utils/test_import_structure.py | 4 ++++ 3 files changed, 34 insertions(+), 3 deletions(-) create mode 100644 tests/utils/import_structures/failing_export.py diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 7e7e2cbb8bb9..5e2b659ccd63 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -1761,6 +1761,9 @@ def export(*, backends=()): to execute correctly without instantiating it - The '@export' string is used to dynamically import objects """ + for backend in backends: + if backend not in BACKENDS_MAPPING: + raise ValueError(f"Backend should be defined in the BACKENDS_MAPPING. Offending backend: {backend}") if not isinstance(backends, tuple): raise ValueError("Backends should be a tuple.") @@ -1915,7 +1918,7 @@ def define_import_structure(module_path): # Objects that have a `@export` assigned to them will get exported # with the backends specified in the decorator as well as the file backends. - registered_objects = set() + exported_objects = set() if '@export' in file_content: lines = file_content.split("\n") for index, line in enumerate(lines): @@ -1986,7 +1989,7 @@ def define_import_structure(module_path): start_index = 6 if line.startswith("class") else 4 object_name = line[start_index:].split("(")[0].strip(":") module_requirements[backends][module_name].add(object_name) - registered_objects.add(object_name) + exported_objects.add(object_name) if not skip_line: previous_line = line @@ -1996,7 +1999,7 @@ def define_import_structure(module_path): # These objects are exported with the file backends. if '__all__' in file_content: for _all_object in fetch__all__(file_content): - if _all_object not in registered_objects: + if _all_object not in exported_objects: backends = frozenset(base_requirements) if backends not in module_requirements: diff --git a/tests/utils/import_structures/failing_export.py b/tests/utils/import_structures/failing_export.py new file mode 100644 index 000000000000..4aef29b76df5 --- /dev/null +++ b/tests/utils/import_structures/failing_export.py @@ -0,0 +1,24 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# fmt: off + +from transformers.utils.import_utils import export + + +@export(backends=("random_item_that_should_not_exist",)) +class A0: + def __init__(self): + pass + diff --git a/tests/utils/test_import_structure.py b/tests/utils/test_import_structure.py index ae28f31b2559..18f4b8400886 100644 --- a/tests/utils/test_import_structure.py +++ b/tests/utils/test_import_structure.py @@ -92,3 +92,7 @@ def test_transformers_specific_model_import(self): f"Defined in __all__: {sorted(_all)}\nDefined with register: {sorted(objects)}" ) self.assertListEqual(sorted(objects), sorted(_all), msg=error_message) + + def test_export_backend_should_be_defined(self): + with self.assertRaisesRegex(ValueError, "Backend should be defined in the BACKENDS_MAPPING"): + pass From c07e817452328f02f870785d3dbdb196640b814f Mon Sep 17 00:00:00 2001 From: Lysandre Date: Fri, 26 Jul 2024 14:54:21 +0200 Subject: [PATCH 08/11] More specific type --- .../models/speech_to_text/__init__.py | 1 + src/transformers/utils/import_utils.py | 42 +++++++++++++++++-- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index 4ad05da69710..fcf5e7eb821e 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -100,5 +100,6 @@ else: import sys + print(type(__spec__)) sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 5e2b659ccd63..5e34ab320a17 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -17,6 +17,7 @@ import importlib.metadata import importlib.util +import importlib.machinery import json import os import shutil @@ -27,7 +28,7 @@ from functools import lru_cache from itertools import chain from types import ModuleType -from typing import Any, Optional, Tuple, Union, List +from typing import Any, Tuple, Union, Dict, Set, FrozenSet from packaging import version @@ -1614,6 +1615,9 @@ def is_torch_fx_proxy(x): return False +BACKENDS_T = FrozenSet[str] +IMPORT_STRUCTURE_T = Dict[BACKENDS_T, Dict[str, Set[str]]] + class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. @@ -1621,7 +1625,14 @@ class _LazyModule(ModuleType): # Very heavily inspired by optuna.integration._IntegrationModule # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py - def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): + def __init__( + self, + name: str, + module_file: str, + import_structure: IMPORT_STRUCTURE_T, + module_spec: importlib.machinery.ModuleSpec = None, + extra_objects: Dict[str, object] = None + ): super().__init__(name) self._object_missing_backend = {} @@ -1821,7 +1832,7 @@ def fetch__all__(file_content): @lru_cache() -def define_import_structure(module_path): +def create_import_structure_from_path(module_path): """ This method takes the path to a file/a folder and returns the import structure. If a file is given, it will return the import structure of the parent folder. @@ -2106,3 +2117,28 @@ def flatten_dict(_dict, previous_key=None): flattened_import_structure[key] = flatten_dict(value) return flattened_import_structure + + +def define_import_structure(module_path: str) -> IMPORT_STRUCTURE_T: + """ + This method takes a module_path as input and creates an import structure digestible by a _LazyModule. + + Here's an example of an output import structure at the src.transformers.models level: + + { + frozenset({'tokenizers'}): { + 'albert.tokenization_albert_fast': {'AlbertTokenizerFast'} + }, + frozenset(): { + 'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}, + 'align.processing_align': {'AlignProcessor'}, + 'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'}, + 'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'}, + 'altclip.processing_altclip': {'AltCLIPProcessor'} + } + } + + The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects. + """ + import_structure = create_import_structure_from_path(module_path) + return spread_import_structure(import_structure) \ No newline at end of file From f92d17c9e6a063c82420cbd90e006098ffa09208 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Fri, 26 Jul 2024 15:44:53 +0200 Subject: [PATCH 09/11] More efficient listdir --- src/transformers/models/speech_to_text/__init__.py | 2 -- src/transformers/utils/import_utils.py | 14 ++++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index fcf5e7eb821e..48c06fd6108b 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -100,6 +100,4 @@ else: import sys - print(type(__spec__)) - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 5e34ab320a17..240001fcfa57 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -1887,14 +1887,20 @@ def create_import_structure_from_path(module_path): """ import_structure = {} if os.path.isdir(module_path): + + directory = module_path + adjacent_modules = [] + for f in os.listdir(module_path): if f != "__pycache__" and os.path.isdir(os.path.join(module_path, f)): - import_structure[f] = define_import_structure(os.path.join(module_path, f)) - directory = module_path + import_structure[f] = create_import_structure_from_path(os.path.join(module_path, f)) + + elif not os.path.isdir(os.path.join(directory, f)): + adjacent_modules.append(f) + else: directory = os.path.dirname(module_path) - - adjacent_modules = [f for f in os.listdir(directory) if not os.path.isdir(os.path.join(directory, f))] + adjacent_modules = [f for f in os.listdir(directory) if not os.path.isdir(os.path.join(directory, f))] # We're only taking a look at files different from __init__.py # We could theoretically export things directly from the __init__.py From 3967eaa39e918100fd2ae9a44906bd502dfff25d Mon Sep 17 00:00:00 2001 From: Lysandre Date: Fri, 6 Sep 2024 11:59:17 +0200 Subject: [PATCH 10/11] Misc fixes --- src/transformers/__init__.py | 3 - .../models/align/processing_align.py | 2 + .../models/speech_to_text/__init__.py | 1 + src/transformers/utils/dummy_pt_objects.py | 189 +----------------- .../utils/dummy_sentencepiece_objects.py | 4 +- src/transformers/utils/dummy_tf_objects.py | 35 ---- src/transformers/utils/import_utils.py | 35 ++-- .../longformer/test_modeling_longformer.py | 2 +- .../longformer/test_modeling_tf_longformer.py | 2 +- .../models/reformer/test_modeling_reformer.py | 2 +- .../utils/import_structures/failing_export.py | 1 - utils/custom_init_isort.py | 2 +- 12 files changed, 32 insertions(+), 246 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 1039afcdaf33..faed0cea81d8 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -3895,7 +3895,6 @@ "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", - "TFConvBertLayer", "TFConvBertModel", "TFConvBertPreTrainedModel", ] @@ -4226,7 +4225,6 @@ "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", - "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] @@ -4272,7 +4270,6 @@ "TFRoFormerForQuestionAnswering", "TFRoFormerForSequenceClassification", "TFRoFormerForTokenClassification", - "TFRoFormerLayer", "TFRoFormerModel", "TFRoFormerPreTrainedModel", ] diff --git a/src/transformers/models/align/processing_align.py b/src/transformers/models/align/processing_align.py index 923daee965fb..a5846a87d236 100644 --- a/src/transformers/models/align/processing_align.py +++ b/src/transformers/models/align/processing_align.py @@ -16,6 +16,8 @@ Image/Text processor class for ALIGN """ +from typing import List, Union + try: from typing import Unpack diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index 48c06fd6108b..4ad05da69710 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -100,4 +100,5 @@ else: import sys + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index ddf7608155e8..5c84f97319ec 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1262,13 +1262,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class BertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class BertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] @@ -1368,13 +1361,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class BigBirdLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class BigBirdModel(metaclass=DummyObject): _backends = ["torch"] @@ -1862,13 +1848,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class CanineLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class CanineModel(metaclass=DummyObject): _backends = ["torch"] @@ -2230,13 +2209,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ConvBertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class ConvBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -3144,13 +3116,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class QDQBertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class QDQBertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] @@ -4133,13 +4098,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class FNetLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class FNetModel(metaclass=DummyObject): _backends = ["torch"] @@ -4572,13 +4530,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTNeoXLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class GPTNeoXModel(metaclass=DummyObject): _backends = ["torch"] @@ -4600,13 +4551,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class GPTNeoXJapaneseLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class GPTNeoXJapaneseModel(metaclass=DummyObject): _backends = ["torch"] @@ -5437,13 +5381,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class LongformerSelfAttention(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class LongT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] @@ -5584,13 +5521,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class LxmertXLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] @@ -5675,6 +5605,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class MarianPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class MarkupLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] @@ -6011,13 +5948,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MobileBertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MobileBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -6184,13 +6114,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class MPNetLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class MPNetModel(metaclass=DummyObject): _backends = ["torch"] @@ -6562,13 +6485,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class NystromformerLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class NystromformerModel(metaclass=DummyObject): _backends = ["torch"] @@ -6993,13 +6909,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class PerceiverLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class PerceiverModel(metaclass=DummyObject): _backends = ["torch"] @@ -7469,13 +7378,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ReformerAttention(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class ReformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] @@ -7497,13 +7399,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ReformerLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class ReformerModel(metaclass=DummyObject): _backends = ["torch"] @@ -7588,13 +7483,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RemBertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class RemBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -7802,13 +7690,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RoCBertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class RoCBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -7869,13 +7750,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class RoFormerLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class RoFormerModel(metaclass=DummyObject): _backends = ["torch"] @@ -8097,13 +7971,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class SegformerLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class SegformerModel(metaclass=DummyObject): _backends = ["torch"] @@ -8314,13 +8181,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class SplinterLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class SplinterModel(metaclass=DummyObject): _backends = ["torch"] @@ -8377,13 +8237,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class SqueezeBertModule(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class SqueezeBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] @@ -9092,13 +8945,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ViltLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class ViltModel(metaclass=DummyObject): _backends = ["torch"] @@ -9176,13 +9022,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class VisualBertLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class VisualBertModel(metaclass=DummyObject): _backends = ["torch"] @@ -9232,13 +9071,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class ViTMAELayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class ViTMAEModel(metaclass=DummyObject): _backends = ["torch"] @@ -9957,13 +9789,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) -class YosoLayer(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - class YosoModel(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/transformers/utils/dummy_sentencepiece_objects.py b/src/transformers/utils/dummy_sentencepiece_objects.py index 8977b4f51b63..7931e0fe6584 100644 --- a/src/transformers/utils/dummy_sentencepiece_objects.py +++ b/src/transformers/utils/dummy_sentencepiece_objects.py @@ -128,14 +128,14 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) -class MBart50Tokenizer(metaclass=DummyObject): +class MBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) -class MBartTokenizer(metaclass=DummyObject): +class MBart50Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 942a7afced4b..6e1674c9173e 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -478,13 +478,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -class TFBertEmbeddings(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -772,13 +765,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -class TFConvBertLayer(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFConvBertModel(metaclass=DummyObject): _backends = ["tf"] @@ -1717,13 +1703,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -class TFLongformerSelfAttention(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFLxmertForPreTraining(metaclass=DummyObject): _backends = ["tf"] @@ -2179,13 +2158,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -class TFRemBertLayer(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFRemBertModel(metaclass=DummyObject): _backends = ["tf"] @@ -2389,13 +2361,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) -class TFRoFormerLayer(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFRoFormerModel(metaclass=DummyObject): _backends = ["tf"] diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 240001fcfa57..092e43b489f4 100755 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -15,9 +15,9 @@ Import utilities: Utilities related to imports and our lazy inits. """ +import importlib.machinery import importlib.metadata import importlib.util -import importlib.machinery import json import os import shutil @@ -28,7 +28,7 @@ from functools import lru_cache from itertools import chain from types import ModuleType -from typing import Any, Tuple, Union, Dict, Set, FrozenSet +from typing import Any, Dict, FrozenSet, Optional, Set, Tuple, Union from packaging import version @@ -1618,6 +1618,7 @@ def is_torch_fx_proxy(x): BACKENDS_T = FrozenSet[str] IMPORT_STRUCTURE_T = Dict[BACKENDS_T, Dict[str, Set[str]]] + class _LazyModule(ModuleType): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. @@ -1631,7 +1632,7 @@ def __init__( module_file: str, import_structure: IMPORT_STRUCTURE_T, module_spec: importlib.machinery.ModuleSpec = None, - extra_objects: Dict[str, object] = None + extra_objects: Dict[str, object] = None, ): super().__init__(name) @@ -1653,7 +1654,7 @@ def __init__( callable, error = BACKENDS_MAPPING[backend] if not callable(): missing_backends.append(backend) - self._modules.union(set(module.keys())) + self._modules = self._modules.union(set(module.keys())) for key, values in module.items(): if len(missing_backends): @@ -1668,7 +1669,6 @@ def __init__( # Needed for autocompletion in an IDE self.__all__.extend(list(module.keys()) + list(chain(*module.values()))) - self.__file__ = module_file self.__spec__ = module_spec self.__path__ = [os.path.dirname(module_file)] @@ -1787,10 +1787,10 @@ def inner_fn(fun): BASE_FILE_REQUIREMENTS = { - lambda e: 'modeling_tf_' in e: ('tf',), - lambda e: 'modeling_flax_' in e: ('flax',), - lambda e: 'modeling_' in e: ('torch',), - lambda e: e.startswith('tokenization_') and e.endswith('_fast'): ('tokenizers',), + lambda e: "modeling_tf_" in e: ("tf",), + lambda e: "modeling_flax_" in e: ("flax",), + lambda e: "modeling_" in e: ("torch",), + lambda e: e.startswith("tokenization_") and e.endswith("_fast"): ("tokenizers",), } @@ -1800,7 +1800,7 @@ def fetch__all__(file_content): Returns None if not defined, otherwise returns a list of strings. """ - if '__all__' not in file_content: + if "__all__" not in file_content: return [] lines = file_content.splitlines() @@ -1810,7 +1810,7 @@ def fetch__all__(file_content): lines = lines[start_index:] - if not lines[0].startswith('__all__'): + if not lines[0].startswith("__all__"): raise ValueError( "fetch__all__ accepts a list of lines, with the first line being the __all__ variable declaration" ) @@ -1887,7 +1887,6 @@ def create_import_structure_from_path(module_path): """ import_structure = {} if os.path.isdir(module_path): - directory = module_path adjacent_modules = [] @@ -1910,9 +1909,8 @@ def create_import_structure_from_path(module_path): module_requirements = {} for module_name in adjacent_modules: - # Only modules ending in `.py` are accepted here. - if not module_name.endswith('.py'): + if not module_name.endswith(".py"): continue with open(os.path.join(directory, module_name)) as f: @@ -1936,10 +1934,9 @@ def create_import_structure_from_path(module_path): # Objects that have a `@export` assigned to them will get exported # with the backends specified in the decorator as well as the file backends. exported_objects = set() - if '@export' in file_content: + if "@export" in file_content: lines = file_content.split("\n") for index, line in enumerate(lines): - # This allows exporting items with other decorators. We'll take a look # at the line that follows at the same indentation level. if line.startswith((" ", "\t", "@", ")")) and not line.startswith("@export"): @@ -2014,10 +2011,9 @@ def create_import_structure_from_path(module_path): # All objects that are in __all__ should be exported by default. # These objects are exported with the file backends. - if '__all__' in file_content: + if "__all__" in file_content: for _all_object in fetch__all__(file_content): if _all_object not in exported_objects: - backends = frozenset(base_requirements) if backends not in module_requirements: module_requirements[backends] = {} @@ -2076,6 +2072,7 @@ def spread_import_structure(nested_import_structure): } """ + def propagate_frozenset(unordered_import_structure): tuple_first_import_structure = {} for _key, _value in unordered_import_structure.items(): @@ -2147,4 +2144,4 @@ def define_import_structure(module_path: str) -> IMPORT_STRUCTURE_T: The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects. """ import_structure = create_import_structure_from_path(module_path) - return spread_import_structure(import_structure) \ No newline at end of file + return spread_import_structure(import_structure) diff --git a/tests/models/longformer/test_modeling_longformer.py b/tests/models/longformer/test_modeling_longformer.py index ef133142573d..e7f2f67cc232 100644 --- a/tests/models/longformer/test_modeling_longformer.py +++ b/tests/models/longformer/test_modeling_longformer.py @@ -34,8 +34,8 @@ LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, - LongformerSelfAttention, ) + from transformers.models.longformer.modeling_longformer import LongformerSelfAttention class LongformerModelTester: diff --git a/tests/models/longformer/test_modeling_tf_longformer.py b/tests/models/longformer/test_modeling_tf_longformer.py index 0eda06522681..131c07765345 100644 --- a/tests/models/longformer/test_modeling_tf_longformer.py +++ b/tests/models/longformer/test_modeling_tf_longformer.py @@ -37,8 +37,8 @@ TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, - TFLongformerSelfAttention, ) + from transformers.models.longformer.modeling_tf_longformer import TFLongformerSelfAttention from transformers.tf_utils import shape_list diff --git a/tests/models/reformer/test_modeling_reformer.py b/tests/models/reformer/test_modeling_reformer.py index 152c4f2ba33f..ba0a9232847a 100644 --- a/tests/models/reformer/test_modeling_reformer.py +++ b/tests/models/reformer/test_modeling_reformer.py @@ -40,11 +40,11 @@ ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, - ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerTokenizer, ) + from transformers.models.reformer.modeling_reformer import ReformerLayer class ReformerModelTester: diff --git a/tests/utils/import_structures/failing_export.py b/tests/utils/import_structures/failing_export.py index 4aef29b76df5..d635619b6075 100644 --- a/tests/utils/import_structures/failing_export.py +++ b/tests/utils/import_structures/failing_export.py @@ -21,4 +21,3 @@ class A0: def __init__(self): pass - diff --git a/utils/custom_init_isort.py b/utils/custom_init_isort.py index 7adf804eaf1f..82bf07ce43a9 100644 --- a/utils/custom_init_isort.py +++ b/utils/custom_init_isort.py @@ -244,7 +244,7 @@ def sort_imports(file: str, check_only: bool = True): code = f.read() # If the file is not a custom init, there is nothing to do. - if "_import_structure" not in code: + if "_import_structure" not in code or "define_import_structure" in code: return # Blocks of indent level 0 From e3665de463aca8c4904ab9e88d5195623f742c3f Mon Sep 17 00:00:00 2001 From: Lysandre Date: Fri, 6 Sep 2024 14:52:31 +0200 Subject: [PATCH 11/11] Rest of the model refactors --- .circleci/config.yml | 2 +- Makefile | 2 +- .../audio_spectrogram_transformer/__init__.py | 45 +---- ...iguration_audio_spectrogram_transformer.py | 3 + ...xtraction_audio_spectrogram_transformer.py | 3 + .../modeling_audio_spectrogram_transformer.py | 3 + .../models/autoformer/__init__.py | 42 +--- .../autoformer/configuration_autoformer.py | 3 + .../models/autoformer/modeling_autoformer.py | 3 + src/transformers/models/bark/__init__.py | 61 +----- .../models/bark/configuration_bark.py | 3 + src/transformers/models/bark/modeling_bark.py | 12 ++ .../models/bark/processing_bark.py | 3 + src/transformers/models/bart/__init__.py | 135 +------------ .../models/bart/configuration_bart.py | 5 +- src/transformers/models/bart/modeling_bart.py | 13 ++ .../models/bart/modeling_flax_bart.py | 10 + .../models/bart/modeling_tf_bart.py | 9 + .../models/bart/tokenization_bart.py | 3 + .../models/bart/tokenization_bart_fast.py | 3 + src/transformers/models/barthez/__init__.py | 44 +---- .../models/barthez/tokenization_barthez.py | 5 + .../barthez/tokenization_barthez_fast.py | 3 + src/transformers/models/bartpho/__init__.py | 26 +-- .../models/bartpho/tokenization_bartpho.py | 5 + src/transformers/models/beit/__init__.py | 98 +--------- .../models/beit/configuration_beit.py | 3 + .../models/beit/feature_extraction_beit.py | 5 + .../models/beit/image_processing_beit.py | 5 + src/transformers/models/beit/modeling_beit.py | 10 + .../models/beit/modeling_flax_beit.py | 7 + src/transformers/models/bert/__init__.py | 183 ++---------------- .../models/bert/configuration_bert.py | 3 + src/transformers/models/bert/modeling_bert.py | 15 ++ .../models/bert/modeling_flax_bert.py | 13 ++ .../models/bert/modeling_tf_bert.py | 15 ++ .../models/bert/tokenization_bert.py | 3 + .../models/bert/tokenization_bert_fast.py | 3 + .../models/bert/tokenization_bert_tf.py | 5 + .../models/bert_generation/__init__.py | 57 +----- .../configuration_bert_generation.py | 3 + .../modeling_bert_generation.py | 8 + .../tokenization_bert_generation.py | 5 + .../models/bert_japanese/__init__.py | 11 +- .../tokenization_bert_japanese.py | 3 + src/transformers/models/bertweet/__init__.py | 11 +- .../models/bertweet/tokenization_bertweet.py | 2 + src/transformers/models/big_bird/__init__.py | 133 +------------ .../models/big_bird/configuration_big_bird.py | 3 + .../models/big_bird/modeling_big_bird.py | 14 ++ .../models/big_bird/modeling_flax_big_bird.py | 12 ++ .../models/big_bird/tokenization_big_bird.py | 5 + .../big_bird/tokenization_big_bird_fast.py | 3 + .../models/bigbird_pegasus/__init__.py | 52 +---- .../configuration_bigbird_pegasus.py | 5 +- .../modeling_bigbird_pegasus.py | 10 + src/transformers/models/biogpt/__init__.py | 47 +---- .../models/biogpt/configuration_biogpt.py | 3 + .../models/biogpt/modeling_biogpt.py | 9 + .../models/biogpt/tokenization_biogpt.py | 3 + src/transformers/models/bit/__init__.py | 57 +----- .../models/bit/configuration_bit.py | 3 + .../models/bit/image_processing_bit.py | 5 + src/transformers/models/bit/modeling_bit.py | 3 + .../models/blenderbot/__init__.py | 127 +----------- .../blenderbot/configuration_blenderbot.py | 5 +- .../models/blenderbot/modeling_blenderbot.py | 8 + .../blenderbot/modeling_flax_blenderbot.py | 2 + .../blenderbot/modeling_tf_blenderbot.py | 8 + .../blenderbot/tokenization_blenderbot.py | 3 + .../tokenization_blenderbot_fast.py | 3 + .../models/blenderbot_small/__init__.py | 123 +----------- .../configuration_blenderbot_small.py | 5 +- .../modeling_blenderbot_small.py | 8 + .../modeling_flax_blenderbot_small.py | 6 + .../modeling_tf_blenderbot_small.py | 8 + .../tokenization_blenderbot_small.py | 3 + .../tokenization_blenderbot_small_fast.py | 3 + src/transformers/models/blip/__init__.py | 112 ++--------- .../models/blip/configuration_blip.py | 3 + .../models/blip/image_processing_blip.py | 5 + src/transformers/models/blip/modeling_blip.py | 10 + .../models/blip/modeling_blip_text.py | 5 + .../models/blip/modeling_tf_blip.py | 11 ++ .../models/blip/modeling_tf_blip_text.py | 5 + .../models/blip/processing_blip.py | 3 + src/transformers/models/blip_2/__init__.py | 59 +----- .../models/blip_2/configuration_blip_2.py | 3 + .../models/blip_2/modeling_blip_2.py | 12 ++ .../models/blip_2/processing_blip_2.py | 3 + src/transformers/models/bloom/__init__.py | 88 +-------- .../models/bloom/configuration_bloom.py | 5 +- .../models/bloom/modeling_bloom.py | 10 + .../models/bloom/modeling_flax_bloom.py | 2 + .../models/bloom/tokenization_bloom_fast.py | 3 + .../models/bridgetower/__init__.py | 72 +------ .../bridgetower/configuration_bridgetower.py | 3 + .../image_processing_bridgetower.py | 5 + .../bridgetower/modeling_bridgetower.py | 9 + .../bridgetower/processing_bridgetower.py | 3 + src/transformers/models/bros/__init__.py | 61 +----- .../models/bros/configuration_bros.py | 3 + src/transformers/models/bros/modeling_bros.py | 9 + .../models/bros/processing_bros.py | 3 + src/transformers/models/byt5/__init__.py | 10 +- .../models/byt5/tokenization_byt5.py | 3 + src/transformers/models/camembert/__init__.py | 126 +----------- .../camembert/configuration_camembert.py | 3 + .../models/camembert/modeling_camembert.py | 12 ++ .../models/camembert/modeling_tf_camembert.py | 13 ++ .../camembert/tokenization_camembert.py | 5 + .../camembert/tokenization_camembert_fast.py | 3 + src/transformers/models/canine/__init__.py | 53 +---- .../models/canine/configuration_canine.py | 3 + .../models/canine/modeling_canine.py | 11 ++ .../models/canine/tokenization_canine.py | 3 + .../models/chinese_clip/__init__.py | 72 +------ .../configuration_chinese_clip.py | 3 + .../feature_extraction_chinese_clip.py | 5 + .../image_processing_chinese_clip.py | 5 + .../chinese_clip/modeling_chinese_clip.py | 3 + .../chinese_clip/processing_chinese_clip.py | 3 + src/transformers/models/clap/__init__.py | 59 +----- .../models/clap/configuration_clap.py | 3 + .../models/clap/feature_extraction_clap.py | 3 + src/transformers/models/clap/modeling_clap.py | 10 + .../models/clap/processing_clap.py | 3 + src/transformers/models/clip/__init__.py | 169 ++-------------- .../models/clip/configuration_clip.py | 3 + .../models/clip/feature_extraction_clip.py | 5 + .../models/clip/image_processing_clip.py | 5 + src/transformers/models/clip/modeling_clip.py | 11 ++ .../models/clip/modeling_flax_clip.py | 10 + .../models/clip/modeling_tf_clip.py | 11 ++ .../models/clip/processing_clip.py | 3 + .../models/clip/tokenization_clip.py | 3 + .../models/clip/tokenization_clip_fast.py | 3 + src/transformers/models/clipseg/__init__.py | 53 +---- .../models/clipseg/configuration_clipseg.py | 3 + .../models/clipseg/modeling_clipseg.py | 9 + .../models/clipseg/processing_clipseg.py | 3 + src/transformers/models/clvp/__init__.py | 67 +------ .../models/clvp/configuration_clvp.py | 3 + .../models/clvp/feature_extraction_clvp.py | 3 + src/transformers/models/clvp/modeling_clvp.py | 10 + .../models/clvp/processing_clvp.py | 3 + .../models/clvp/tokenization_clvp.py | 3 + .../models/code_llama/__init__.py | 42 +--- .../code_llama/tokenization_code_llama.py | 5 + .../tokenization_code_llama_fast.py | 3 + src/transformers/models/codegen/__init__.py | 58 +----- .../models/codegen/configuration_codegen.py | 6 +- .../models/codegen/modeling_codegen.py | 3 + .../models/codegen/tokenization_codegen.py | 3 + .../codegen/tokenization_codegen_fast.py | 3 + src/transformers/models/cohere/__init__.py | 63 +----- .../models/cohere/configuration_cohere.py | 3 + .../models/cohere/modeling_cohere.py | 3 + .../models/cohere/tokenization_cohere_fast.py | 3 + .../models/conditional_detr/__init__.py | 68 +------ .../configuration_conditional_detr.py | 3 + .../feature_extraction_conditional_detr.py | 5 + .../image_processing_conditional_detr.py | 5 + .../modeling_conditional_detr.py | 8 + src/transformers/models/convbert/__init__.py | 114 +---------- .../models/convbert/configuration_convbert.py | 3 + .../models/convbert/modeling_convbert.py | 12 ++ .../models/convbert/modeling_tf_convbert.py | 12 ++ .../models/convbert/tokenization_convbert.py | 3 + .../convbert/tokenization_convbert_fast.py | 3 + src/transformers/models/convnext/__init__.py | 86 +------- .../models/convnext/configuration_convnext.py | 3 + .../convnext/feature_extraction_convnext.py | 5 + .../convnext/image_processing_convnext.py | 5 + .../models/convnext/modeling_convnext.py | 3 + .../models/convnext/modeling_tf_convnext.py | 3 + .../models/convnextv2/__init__.py | 85 +------- .../convnextv2/configuration_convnextv2.py | 3 + .../models/convnextv2/modeling_convnextv2.py | 3 + .../convnextv2/modeling_tf_convnextv2.py | 8 + src/transformers/models/cpm/__init__.py | 44 +---- .../models/cpm/tokenization_cpm.py | 5 + .../models/cpm/tokenization_cpm_fast.py | 3 + src/transformers/models/cpmant/__init__.py | 58 +----- .../models/cpmant/configuration_cpmant.py | 3 + .../models/cpmant/modeling_cpmant.py | 3 + .../models/cpmant/tokenization_cpmant.py | 3 + src/transformers/models/ctrl/__init__.py | 72 +------ .../models/ctrl/configuration_ctrl.py | 3 + src/transformers/models/ctrl/modeling_ctrl.py | 3 + .../models/ctrl/modeling_tf_ctrl.py | 9 + .../models/ctrl/tokenization_ctrl.py | 3 + src/transformers/models/cvt/__init__.py | 63 +----- .../models/cvt/configuration_cvt.py | 3 + src/transformers/models/cvt/modeling_cvt.py | 3 + .../models/cvt/modeling_tf_cvt.py | 3 + src/transformers/models/data2vec/__init__.py | 115 ++--------- .../data2vec/configuration_data2vec_audio.py | 3 + .../data2vec/configuration_data2vec_text.py | 3 + .../data2vec/configuration_data2vec_vision.py | 3 + .../data2vec/modeling_data2vec_audio.py | 10 + .../models/data2vec/modeling_data2vec_text.py | 12 ++ .../data2vec/modeling_data2vec_vision.py | 8 + .../data2vec/modeling_tf_data2vec_vision.py | 9 + src/transformers/models/dbrx/__init__.py | 36 +--- .../models/dbrx/configuration_dbrx.py | 3 + src/transformers/models/dbrx/modeling_dbrx.py | 3 + src/transformers/models/deberta/__init__.py | 104 +--------- .../models/deberta/configuration_deberta.py | 6 +- .../models/deberta/modeling_deberta.py | 10 + .../models/deberta/modeling_tf_deberta.py | 12 +- .../models/deberta/tokenization_deberta.py | 3 + .../deberta/tokenization_deberta_fast.py | 3 + .../models/deberta_v2/__init__.py | 110 +---------- .../deberta_v2/configuration_deberta_v2.py | 6 +- .../models/deberta_v2/modeling_deberta_v2.py | 11 ++ .../deberta_v2/modeling_tf_deberta_v2.py | 12 ++ .../deberta_v2/tokenization_deberta_v2.py | 5 + .../tokenization_deberta_v2_fast.py | 3 + .../models/decision_transformer/__init__.py | 44 +---- .../configuration_decision_transformer.py | 3 + .../modeling_decision_transformer.py | 8 + .../models/deformable_detr/__init__.py | 60 +----- .../configuration_deformable_detr.py | 3 + .../feature_extraction_deformable_detr.py | 5 + .../image_processing_deformable_detr.py | 5 + .../modeling_deformable_detr.py | 3 + src/transformers/models/deit/__init__.py | 97 +--------- .../models/deit/configuration_deit.py | 3 + .../models/deit/feature_extraction_deit.py | 5 + .../models/deit/image_processing_deit.py | 5 + src/transformers/models/deit/modeling_deit.py | 9 + .../models/deit/modeling_tf_deit.py | 10 + .../models/deprecated/deta/__init__.py | 57 +----- .../deprecated/deta/configuration_deta.py | 3 + .../deprecated/deta/image_processing_deta.py | 5 + .../models/deprecated/deta/modeling_deta.py | 3 + .../deprecated/efficientformer/__init__.py | 79 +------- .../configuration_efficientformer.py | 3 + .../image_processing_efficientformer.py | 5 + .../modeling_efficientformer.py | 8 + .../modeling_tf_efficientformer.py | 9 + .../models/deprecated/ernie_m/__init__.py | 63 +----- .../ernie_m/configuration_ernie_m.py | 3 + .../deprecated/ernie_m/modeling_ernie_m.py | 12 ++ .../ernie_m/tokenization_ernie_m.py | 5 + .../deprecated/gptsan_japanese/__init__.py | 46 +---- .../configuration_gptsan_japanese.py | 3 + .../modeling_gptsan_japanese.py | 3 + .../tokenization_gptsan_japanese.py | 3 + .../models/deprecated/graphormer/__init__.py | 40 +--- .../graphormer/configuration_graphormer.py | 3 + .../graphormer/modeling_graphormer.py | 3 + .../models/deprecated/jukebox/__init__.py | 52 +---- .../jukebox/configuration_jukebox.py | 3 + .../deprecated/jukebox/modeling_jukebox.py | 3 + .../jukebox/tokenization_jukebox.py | 3 + .../models/deprecated/mega/__init__.py | 53 +---- .../deprecated/mega/configuration_mega.py | 3 + .../models/deprecated/mega/modeling_mega.py | 12 ++ .../models/deprecated/nat/__init__.py | 39 +--- .../deprecated/nat/configuration_nat.py | 3 + .../models/deprecated/nat/modeling_nat.py | 3 + .../models/deprecated/nezha/__init__.py | 52 +---- .../deprecated/nezha/configuration_nezha.py | 5 +- .../models/deprecated/nezha/modeling_nezha.py | 13 ++ .../models/deprecated/qdqbert/__init__.py | 54 +----- .../qdqbert/configuration_qdqbert.py | 3 + .../deprecated/qdqbert/modeling_qdqbert.py | 16 ++ .../models/deprecated/realm/__init__.py | 71 +------ .../deprecated/realm/configuration_realm.py | 3 + .../models/deprecated/realm/modeling_realm.py | 11 ++ .../deprecated/realm/retrieval_realm.py | 3 + .../deprecated/realm/tokenization_realm.py | 3 + .../realm/tokenization_realm_fast.py | 3 + .../deprecated/speech_to_text_2/__init__.py | 50 +---- .../configuration_speech_to_text_2.py | 3 + .../modeling_speech_to_text_2.py | 3 + .../processing_speech_to_text_2.py | 3 + .../tokenization_speech_to_text_2.py | 3 + .../models/deprecated/tvlt/__init__.py | 84 +------- .../deprecated/tvlt/configuration_tvlt.py | 3 + .../tvlt/feature_extraction_tvlt.py | 3 + .../deprecated/tvlt/image_processing_tvlt.py | 5 + .../models/deprecated/tvlt/modeling_tvlt.py | 3 + .../models/deprecated/tvlt/processing_tvlt.py | 3 + .../models/deprecated/vit_hybrid/__init__.py | 55 +----- .../vit_hybrid/configuration_vit_hybrid.py | 3 + .../vit_hybrid/image_processing_vit_hybrid.py | 5 + .../vit_hybrid/modeling_vit_hybrid.py | 3 + .../deprecated/xlm_prophetnet/__init__.py | 62 +----- .../configuration_xlm_prophetnet.py | 3 + .../xlm_prophetnet/modeling_xlm_prophetnet.py | 10 + .../tokenization_xlm_prophetnet.py | 5 + .../models/depth_anything/__init__.py | 37 +--- .../configuration_depth_anything.py | 3 + .../depth_anything/modeling_depth_anything.py | 3 + src/transformers/models/detr/__init__.py | 60 +----- .../models/detr/configuration_detr.py | 3 + .../models/detr/feature_extraction_detr.py | 5 + .../models/detr/image_processing_detr.py | 5 + src/transformers/models/detr/modeling_detr.py | 3 + src/transformers/models/dinat/__init__.py | 39 +--- .../models/dinat/configuration_dinat.py | 3 + .../models/dinat/modeling_dinat.py | 3 + src/transformers/models/dinov2/__init__.py | 68 +------ .../models/dinov2/configuration_dinov2.py | 3 + .../models/dinov2/modeling_dinov2.py | 3 + .../models/distilbert/__init__.py | 149 +------------- .../distilbert/configuration_distilbert.py | 3 + .../models/distilbert/modeling_distilbert.py | 13 ++ .../distilbert/modeling_flax_distilbert.py | 10 + .../distilbert/modeling_tf_distilbert.py | 14 ++ .../distilbert/tokenization_distilbert.py | 3 + .../tokenization_distilbert_fast.py | 3 + src/transformers/models/donut/__init__.py | 60 +----- .../models/donut/configuration_donut_swin.py | 3 + .../models/donut/feature_extraction_donut.py | 5 + .../models/donut/image_processing_donut.py | 6 +- .../models/donut/modeling_donut_swin.py | 3 + .../models/donut/processing_donut.py | 3 + src/transformers/models/dpr/__init__.py | 124 +----------- .../models/dpr/configuration_dpr.py | 3 + src/transformers/models/dpr/modeling_dpr.py | 12 ++ .../models/dpr/modeling_tf_dpr.py | 10 + .../models/dpr/tokenization_dpr.py | 7 + .../models/dpr/tokenization_dpr_fast.py | 7 + src/transformers/models/dpt/__init__.py | 61 +----- .../models/dpt/configuration_dpt.py | 3 + .../models/dpt/feature_extraction_dpt.py | 5 + .../models/dpt/image_processing_dpt.py | 5 + src/transformers/models/dpt/modeling_dpt.py | 3 + .../models/efficientnet/__init__.py | 76 +------- .../configuration_efficientnet.py | 3 + .../image_processing_efficientnet.py | 5 + .../efficientnet/modeling_efficientnet.py | 3 + src/transformers/models/electra/__init__.py | 153 +-------------- .../models/electra/configuration_electra.py | 3 + .../models/electra/modeling_electra.py | 14 ++ .../models/electra/modeling_flax_electra.py | 12 ++ .../models/electra/modeling_tf_electra.py | 13 ++ .../models/electra/tokenization_electra.py | 3 + .../electra/tokenization_electra_fast.py | 3 + src/transformers/models/encodec/__init__.py | 45 +---- .../models/encodec/configuration_encodec.py | 3 + .../encodec/feature_extraction_encodec.py | 3 + .../models/encodec/modeling_encodec.py | 3 + .../models/encoder_decoder/__init__.py | 69 +------ .../configuration_encoder_decoder.py | 3 + .../modeling_encoder_decoder.py | 3 + .../modeling_flax_encoder_decoder.py | 3 + .../modeling_tf_encoder_decoder.py | 3 + src/transformers/models/ernie/__init__.py | 53 +---- .../models/ernie/configuration_ernie.py | 3 + .../models/ernie/modeling_ernie.py | 14 ++ src/transformers/models/esm/__init__.py | 78 +------- .../models/esm/configuration_esm.py | 3 + src/transformers/models/esm/modeling_esm.py | 9 + .../models/esm/modeling_esmfold.py | 3 + .../models/esm/modeling_tf_esm.py | 10 + .../models/esm/tokenization_esm.py | 3 + src/transformers/models/falcon/__init__.py | 50 +---- .../models/falcon/configuration_falcon.py | 3 + .../models/falcon/modeling_falcon.py | 10 + .../models/fastspeech2_conformer/__init__.py | 55 +----- .../configuration_fastspeech2_conformer.py | 3 + .../modeling_fastspeech2_conformer.py | 8 + .../tokenization_fastspeech2_conformer.py | 3 + src/transformers/models/flaubert/__init__.py | 86 +------- .../models/flaubert/configuration_flaubert.py | 3 + .../models/flaubert/modeling_flaubert.py | 14 ++ .../models/flaubert/modeling_tf_flaubert.py | 12 ++ .../models/flaubert/tokenization_flaubert.py | 3 + src/transformers/models/flava/__init__.py | 81 +------- .../models/flava/configuration_flava.py | 3 + .../models/flava/feature_extraction_flava.py | 5 + .../models/flava/image_processing_flava.py | 5 + .../models/flava/modeling_flava.py | 11 ++ .../models/flava/processing_flava.py | 5 + src/transformers/models/fnet/__init__.py | 92 +-------- .../models/fnet/configuration_fnet.py | 3 + src/transformers/models/fnet/modeling_fnet.py | 13 ++ .../models/fnet/tokenization_fnet.py | 5 + .../models/fnet/tokenization_fnet_fast.py | 3 + src/transformers/models/focalnet/__init__.py | 42 +--- .../models/focalnet/configuration_focalnet.py | 3 + .../models/focalnet/modeling_focalnet.py | 9 + src/transformers/models/fsmt/__init__.py | 35 +--- .../models/fsmt/configuration_fsmt.py | 3 + src/transformers/models/fsmt/modeling_fsmt.py | 3 + .../models/fsmt/tokenization_fsmt.py | 3 + src/transformers/models/funnel/__init__.py | 118 +---------- .../models/funnel/configuration_funnel.py | 3 + .../models/funnel/modeling_funnel.py | 14 ++ .../models/funnel/modeling_tf_funnel.py | 13 ++ .../models/funnel/tokenization_funnel.py | 3 + .../models/funnel/tokenization_funnel_fast.py | 3 + src/transformers/models/fuyu/__init__.py | 60 +----- .../models/fuyu/configuration_fuyu.py | 3 + .../models/fuyu/image_processing_fuyu.py | 5 + src/transformers/models/fuyu/modeling_fuyu.py | 3 + .../models/fuyu/processing_fuyu.py | 5 + src/transformers/models/gemma/__init__.py | 111 +---------- .../models/gemma/configuration_gemma.py | 3 + .../models/gemma/modeling_flax_gemma.py | 2 + .../models/gemma/modeling_gemma.py | 11 ++ .../models/gemma/tokenization_gemma.py | 5 + .../models/gemma/tokenization_gemma_fast.py | 3 + src/transformers/models/git/__init__.py | 44 +---- .../models/git/configuration_git.py | 3 + src/transformers/models/git/modeling_git.py | 3 + src/transformers/models/git/processing_git.py | 3 + src/transformers/models/glpn/__init__.py | 60 +----- .../models/glpn/configuration_glpn.py | 3 + .../models/glpn/feature_extraction_glpn.py | 5 + .../models/glpn/image_processing_glpn.py | 5 + src/transformers/models/glpn/modeling_glpn.py | 3 + src/transformers/models/gpt2/__init__.py | 143 ++------------ .../models/gpt2/configuration_gpt2.py | 6 +- .../models/gpt2/modeling_flax_gpt2.py | 2 + src/transformers/models/gpt2/modeling_gpt2.py | 12 ++ .../models/gpt2/modeling_tf_gpt2.py | 10 + .../models/gpt2/tokenization_gpt2.py | 3 + .../models/gpt2/tokenization_gpt2_fast.py | 3 + .../models/gpt2/tokenization_gpt2_tf.py | 3 + .../models/gpt_bigcode/__init__.py | 48 +---- .../gpt_bigcode/configuration_gpt_bigcode.py | 3 + .../gpt_bigcode/modeling_gpt_bigcode.py | 9 + src/transformers/models/gpt_neo/__init__.py | 69 +------ .../models/gpt_neo/configuration_gpt_neo.py | 6 +- .../models/gpt_neo/modeling_flax_gpt_neo.py | 2 + .../models/gpt_neo/modeling_gpt_neo.py | 11 ++ src/transformers/models/gpt_neox/__init__.py | 64 +----- .../models/gpt_neox/configuration_gpt_neox.py | 3 + .../models/gpt_neox/modeling_gpt_neox.py | 10 + .../gpt_neox/tokenization_gpt_neox_fast.py | 3 + .../models/gpt_neox_japanese/__init__.py | 46 +---- .../configuration_gpt_neox_japanese.py | 3 + .../modeling_gpt_neox_japanese.py | 3 + .../tokenization_gpt_neox_japanese.py | 3 + src/transformers/models/gpt_sw3/__init__.py | 27 +-- .../models/gpt_sw3/tokenization_gpt_sw3.py | 5 + src/transformers/models/gptj/__init__.py | 97 +--------- .../models/gptj/configuration_gptj.py | 6 +- .../models/gptj/modeling_flax_gptj.py | 2 + src/transformers/models/gptj/modeling_gptj.py | 9 + .../models/gptj/modeling_tf_gptj.py | 9 + .../models/grounding_dino/__init__.py | 62 +----- .../configuration_grounding_dino.py | 3 + .../image_processing_grounding_dino.py | 5 + .../grounding_dino/modeling_grounding_dino.py | 3 + .../processing_grounding_dino.py | 3 + src/transformers/models/groupvit/__init__.py | 77 +------- .../models/groupvit/configuration_groupvit.py | 3 + .../models/groupvit/modeling_groupvit.py | 3 + .../models/groupvit/modeling_tf_groupvit.py | 3 + src/transformers/models/herbert/__init__.py | 30 +-- .../models/herbert/tokenization_herbert.py | 3 + .../herbert/tokenization_herbert_fast.py | 3 + src/transformers/models/hubert/__init__.py | 65 +------ .../models/hubert/configuration_hubert.py | 3 + .../models/hubert/modeling_hubert.py | 3 + .../models/hubert/modeling_tf_hubert.py | 3 + src/transformers/models/ibert/__init__.py | 45 +---- .../models/ibert/configuration_ibert.py | 3 + .../models/ibert/modeling_ibert.py | 11 ++ src/transformers/models/idefics/__init__.py | 87 +-------- .../models/idefics/configuration_idefics.py | 3 + .../idefics/image_processing_idefics.py | 5 + .../models/idefics/modeling_idefics.py | 6 +- .../models/idefics/modeling_tf_idefics.py | 3 + .../models/idefics/processing_idefics.py | 3 + src/transformers/models/idefics2/__init__.py | 59 +----- .../models/idefics2/configuration_idefics2.py | 3 + .../idefics2/image_processing_idefics2.py | 5 + .../models/idefics2/modeling_idefics2.py | 5 +- .../models/idefics2/processing_idefics2.py | 3 + src/transformers/models/imagegpt/__init__.py | 62 +----- .../models/imagegpt/configuration_imagegpt.py | 5 +- .../imagegpt/feature_extraction_imagegpt.py | 5 + .../imagegpt/image_processing_imagegpt.py | 5 + .../models/imagegpt/modeling_imagegpt.py | 9 + src/transformers/models/informer/__init__.py | 40 +--- .../models/informer/configuration_informer.py | 3 + .../models/informer/modeling_informer.py | 3 + .../models/instructblip/__init__.py | 51 +---- .../configuration_instructblip.py | 3 + .../instructblip/modeling_instructblip.py | 8 + .../instructblip/processing_instructblip.py | 3 + src/transformers/models/jamba/__init__.py | 43 +--- .../models/jamba/configuration_jamba.py | 3 + .../models/jamba/modeling_jamba.py | 6 + src/transformers/models/kosmos2/__init__.py | 47 +---- .../models/kosmos2/configuration_kosmos2.py | 3 + .../models/kosmos2/modeling_kosmos2.py | 3 + .../models/kosmos2/processing_kosmos2.py | 3 + src/transformers/models/layoutlm/__init__.py | 104 +--------- .../models/layoutlm/configuration_layoutlm.py | 6 +- .../models/layoutlm/modeling_layoutlm.py | 10 + .../models/layoutlm/modeling_tf_layoutlm.py | 11 ++ .../models/layoutlm/tokenization_layoutlm.py | 3 + .../layoutlm/tokenization_layoutlm_fast.py | 3 + .../models/layoutlmv2/__init__.py | 92 ++------- .../layoutlmv2/configuration_layoutlmv2.py | 3 + .../feature_extraction_layoutlmv2.py | 5 + .../layoutlmv2/image_processing_layoutlmv2.py | 5 + .../models/layoutlmv2/modeling_layoutlmv2.py | 9 + .../layoutlmv2/processing_layoutlmv2.py | 3 + .../layoutlmv2/tokenization_layoutlmv2.py | 3 + .../tokenization_layoutlmv2_fast.py | 3 + .../models/layoutlmv3/__init__.py | 129 ++---------- .../layoutlmv3/configuration_layoutlmv3.py | 3 + .../feature_extraction_layoutlmv3.py | 5 + .../layoutlmv3/image_processing_layoutlmv3.py | 5 + .../models/layoutlmv3/modeling_layoutlmv3.py | 9 + .../layoutlmv3/modeling_tf_layoutlmv3.py | 10 + .../layoutlmv3/processing_layoutlmv3.py | 3 + .../layoutlmv3/tokenization_layoutlmv3.py | 3 + .../tokenization_layoutlmv3_fast.py | 3 + src/transformers/models/layoutxlm/__init__.py | 53 +---- .../models/layoutxlm/processing_layoutxlm.py | 3 + .../layoutxlm/tokenization_layoutxlm.py | 5 + .../layoutxlm/tokenization_layoutxlm_fast.py | 3 + src/transformers/models/led/__init__.py | 87 +-------- .../models/led/configuration_led.py | 3 + src/transformers/models/led/modeling_led.py | 9 + .../models/led/modeling_tf_led.py | 3 + .../models/led/tokenization_led.py | 3 + .../models/led/tokenization_led_fast.py | 3 + src/transformers/models/levit/__init__.py | 58 +----- .../models/levit/configuration_levit.py | 3 + .../models/levit/feature_extraction_levit.py | 5 + .../models/levit/image_processing_levit.py | 5 + .../models/levit/modeling_levit.py | 8 + src/transformers/models/lilt/__init__.py | 43 +--- .../models/lilt/configuration_lilt.py | 3 + src/transformers/models/lilt/modeling_lilt.py | 9 + src/transformers/models/llama/__init__.py | 104 +--------- .../models/llama/configuration_llama.py | 3 + .../models/llama/modeling_flax_llama.py | 2 + .../models/llama/modeling_llama.py | 10 + .../models/llama/tokenization_llama.py | 2 + .../models/llama/tokenization_llama_fast.py | 3 + src/transformers/models/llava/__init__.py | 41 +--- .../models/llava/configuration_llava.py | 3 + .../models/llava/modeling_llava.py | 5 +- .../models/llava/processing_llava.py | 3 + .../models/llava_next/__init__.py | 59 +----- .../llava_next/configuration_llava_next.py | 3 + .../llava_next/image_processing_llava_next.py | 5 + .../models/llava_next/modeling_llava_next.py | 5 +- .../llava_next/processing_llava_next.py | 3 + .../models/longformer/__init__.py | 117 +---------- .../longformer/configuration_longformer.py | 3 + .../models/longformer/modeling_longformer.py | 11 ++ .../longformer/modeling_tf_longformer.py | 12 ++ .../longformer/tokenization_longformer.py | 3 + .../tokenization_longformer_fast.py | 3 + src/transformers/models/longt5/__init__.py | 68 +------ .../models/longt5/configuration_longt5.py | 3 + .../models/longt5/modeling_flax_longt5.py | 2 + .../models/longt5/modeling_longt5.py | 3 + src/transformers/models/luke/__init__.py | 57 +----- .../models/luke/configuration_luke.py | 3 + src/transformers/models/luke/modeling_luke.py | 14 ++ .../models/luke/tokenization_luke.py | 3 + src/transformers/models/lxmert/__init__.py | 103 +--------- .../models/lxmert/configuration_lxmert.py | 3 + .../models/lxmert/modeling_lxmert.py | 10 + .../models/lxmert/modeling_tf_lxmert.py | 9 + .../models/lxmert/tokenization_lxmert.py | 3 + .../models/lxmert/tokenization_lxmert_fast.py | 3 + src/transformers/models/m2m_100/__init__.py | 44 +---- .../models/m2m_100/configuration_m2m_100.py | 5 +- .../models/m2m_100/modeling_m2m_100.py | 3 + .../models/m2m_100/tokenization_m2m_100.py | 5 + src/transformers/models/mamba/__init__.py | 43 +--- .../models/mamba/configuration_mamba.py | 3 + .../models/mamba/modeling_mamba.py | 3 + src/transformers/models/marian/__init__.py | 99 +--------- .../models/marian/configuration_marian.py | 5 +- .../models/marian/modeling_flax_marian.py | 2 + .../models/marian/modeling_marian.py | 3 + .../models/marian/modeling_tf_marian.py | 3 + .../models/marian/tokenization_marian.py | 5 + src/transformers/models/markuplm/__init__.py | 70 +------ .../models/markuplm/configuration_markuplm.py | 3 + .../markuplm/feature_extraction_markuplm.py | 3 + .../models/markuplm/modeling_markuplm.py | 9 + .../models/markuplm/processing_markuplm.py | 3 + .../models/markuplm/tokenization_markuplm.py | 3 + .../markuplm/tokenization_markuplm_fast.py | 3 + .../models/mask2former/__init__.py | 56 +----- .../mask2former/configuration_mask2former.py | 3 + .../image_processing_mask2former.py | 5 + .../mask2former/modeling_mask2former.py | 3 + .../models/maskformer/__init__.py | 73 +------ .../maskformer/configuration_maskformer.py | 3 + .../configuration_maskformer_swin.py | 3 + .../feature_extraction_maskformer.py | 5 + .../maskformer/image_processing_maskformer.py | 5 + .../models/maskformer/modeling_maskformer.py | 3 + .../maskformer/modeling_maskformer_swin.py | 3 + src/transformers/models/mbart/__init__.py | 135 +------------ .../models/mbart/configuration_mbart.py | 5 +- .../models/mbart/modeling_flax_mbart.py | 8 + .../models/mbart/modeling_mbart.py | 10 + .../models/mbart/modeling_tf_mbart.py | 3 + .../models/mbart/tokenization_mbart.py | 5 + .../models/mbart/tokenization_mbart_fast.py | 3 + src/transformers/models/mbart50/__init__.py | 43 +--- .../models/mbart50/tokenization_mbart50.py | 5 + .../mbart50/tokenization_mbart50_fast.py | 3 + .../models/megatron_bert/__init__.py | 52 +---- .../configuration_megatron_bert.py | 3 + .../megatron_bert/modeling_megatron_bert.py | 14 ++ src/transformers/models/mgp_str/__init__.py | 57 +----- .../models/mgp_str/configuration_mgp_str.py | 3 + .../models/mgp_str/modeling_mgp_str.py | 3 + .../models/mgp_str/processing_mgp_str.py | 10 +- .../models/mgp_str/tokenization_mgp_str.py | 3 + src/transformers/models/mistral/__init__.py | 103 +--------- .../models/mistral/configuration_mistral.py | 3 + .../models/mistral/modeling_flax_mistral.py | 3 +- .../models/mistral/modeling_mistral.py | 9 + .../models/mistral/modeling_tf_mistral.py | 9 + src/transformers/models/mixtral/__init__.py | 49 +---- .../models/mixtral/configuration_mixtral.py | 3 + .../models/mixtral/modeling_mixtral.py | 11 ++ src/transformers/models/mluke/__init__.py | 28 +-- .../models/mluke/tokenization_mluke.py | 5 + .../models/mobilebert/__init__.py | 127 +----------- .../mobilebert/configuration_mobilebert.py | 3 + .../models/mobilebert/modeling_mobilebert.py | 14 ++ .../mobilebert/modeling_tf_mobilebert.py | 14 ++ .../mobilebert/tokenization_mobilebert.py | 3 + .../tokenization_mobilebert_fast.py | 3 + .../models/mobilenet_v1/__init__.py | 68 +------ .../configuration_mobilenet_v1.py | 3 + .../feature_extraction_mobilenet_v1.py | 5 + .../image_processing_mobilenet_v1.py | 5 + .../mobilenet_v1/modeling_mobilenet_v1.py | 8 + .../models/mobilenet_v2/__init__.py | 71 +------ .../configuration_mobilenet_v2.py | 3 + .../feature_extraction_mobilenet_v2.py | 5 + .../image_processing_mobilenet_v2.py | 5 + .../mobilenet_v2/modeling_mobilenet_v2.py | 9 + src/transformers/models/mobilevit/__init__.py | 94 +-------- .../mobilevit/configuration_mobilevit.py | 3 + .../mobilevit/feature_extraction_mobilevit.py | 5 + .../mobilevit/image_processing_mobilevit.py | 5 + .../models/mobilevit/modeling_mobilevit.py | 8 + .../models/mobilevit/modeling_tf_mobilevit.py | 9 + .../models/mobilevitv2/__init__.py | 52 +---- .../mobilevitv2/configuration_mobilevitv2.py | 3 + .../mobilevitv2/modeling_mobilevitv2.py | 8 + src/transformers/models/mpnet/__init__.py | 114 +---------- .../models/mpnet/configuration_mpnet.py | 3 + .../models/mpnet/modeling_mpnet.py | 11 ++ .../models/mpnet/modeling_tf_mpnet.py | 12 ++ .../models/mpnet/tokenization_mpnet.py | 3 + .../models/mpnet/tokenization_mpnet_fast.py | 3 + src/transformers/models/mpt/__init__.py | 45 +---- .../models/mpt/configuration_mpt.py | 3 + src/transformers/models/mpt/modeling_mpt.py | 10 + src/transformers/models/mra/__init__.py | 61 +----- .../models/mra/configuration_mra.py | 3 + src/transformers/models/mra/modeling_mra.py | 11 ++ src/transformers/models/mt5/__init__.py | 112 +---------- .../models/mt5/configuration_mt5.py | 3 + .../models/mt5/modeling_flax_mt5.py | 3 + src/transformers/models/mt5/modeling_mt5.py | 12 ++ .../models/mt5/modeling_tf_mt5.py | 3 + .../models/mt5/tokenization_mt5.py | 26 +++ .../models/mt5/tokenization_mt5_fast.py | 24 +++ src/transformers/models/musicgen/__init__.py | 49 +---- .../models/musicgen/configuration_musicgen.py | 3 + .../models/musicgen/modeling_musicgen.py | 9 + .../models/musicgen/processing_musicgen.py | 3 + .../models/musicgen_melody/__init__.py | 73 +------ .../configuration_musicgen_melody.py | 3 + .../feature_extraction_musicgen_melody.py | 5 + .../modeling_musicgen_melody.py | 9 + .../processing_musicgen_melody.py | 5 + src/transformers/models/mvp/__init__.py | 64 +----- .../models/mvp/configuration_mvp.py | 3 + src/transformers/models/mvp/modeling_mvp.py | 10 + .../models/mvp/tokenization_mvp.py | 3 + .../models/mvp/tokenization_mvp_fast.py | 3 + src/transformers/models/nllb/__init__.py | 49 +---- .../models/nllb/tokenization_nllb.py | 5 + .../models/nllb/tokenization_nllb_fast.py | 3 + src/transformers/models/nllb_moe/__init__.py | 45 +---- .../models/nllb_moe/configuration_nllb_moe.py | 3 + .../models/nllb_moe/modeling_nllb_moe.py | 9 + src/transformers/models/nougat/__init__.py | 49 +---- .../models/nougat/image_processing_nougat.py | 6 +- .../models/nougat/processing_nougat.py | 3 + .../models/nougat/tokenization_nougat_fast.py | 3 + .../models/nystromformer/__init__.py | 50 +---- .../configuration_nystromformer.py | 3 + .../nystromformer/modeling_nystromformer.py | 11 ++ src/transformers/models/olmo/__init__.py | 44 +---- .../models/olmo/configuration_olmo.py | 3 + src/transformers/models/olmo/modeling_olmo.py | 3 + src/transformers/models/oneformer/__init__.py | 58 +----- .../oneformer/configuration_oneformer.py | 3 + .../oneformer/image_processing_oneformer.py | 5 + .../models/oneformer/modeling_oneformer.py | 3 + .../models/oneformer/processing_oneformer.py | 3 + src/transformers/models/openai/__init__.py | 103 +--------- .../models/openai/configuration_openai.py | 3 + .../models/openai/modeling_openai.py | 10 + .../models/openai/modeling_tf_openai.py | 10 + .../models/openai/tokenization_openai.py | 3 + .../models/openai/tokenization_openai_fast.py | 3 + src/transformers/models/opt/__init__.py | 86 +------- .../models/opt/configuration_opt.py | 3 + .../models/opt/modeling_flax_opt.py | 2 + src/transformers/models/opt/modeling_opt.py | 9 + .../models/opt/modeling_tf_opt.py | 3 + src/transformers/models/owlv2/__init__.py | 76 +------- .../models/owlv2/configuration_owlv2.py | 3 + .../models/owlv2/image_processing_owlv2.py | 5 + .../models/owlv2/modeling_owlv2.py | 3 + .../models/owlv2/processing_owlv2.py | 3 + src/transformers/models/owlvit/__init__.py | 84 +------- .../models/owlvit/configuration_owlvit.py | 3 + .../owlvit/feature_extraction_owlvit.py | 5 + .../models/owlvit/image_processing_owlvit.py | 5 + .../models/owlvit/modeling_owlvit.py | 3 + .../models/owlvit/processing_owlvit.py | 3 + .../models/patchtsmixer/__init__.py | 48 +---- .../configuration_patchtsmixer.py | 3 + .../patchtsmixer/modeling_patchtsmixer.py | 10 + src/transformers/models/patchtst/__init__.py | 46 +---- .../models/patchtst/configuration_patchtst.py | 3 + .../models/patchtst/modeling_patchtst.py | 10 + src/transformers/models/pegasus/__init__.py | 127 +----------- .../models/pegasus/configuration_pegasus.py | 3 + .../models/pegasus/modeling_flax_pegasus.py | 2 + .../models/pegasus/modeling_pegasus.py | 3 + .../models/pegasus/modeling_tf_pegasus.py | 3 + .../models/pegasus/tokenization_pegasus.py | 5 + .../pegasus/tokenization_pegasus_fast.py | 3 + src/transformers/models/pegasus_x/__init__.py | 40 +--- .../pegasus_x/configuration_pegasus_x.py | 3 + .../models/pegasus_x/modeling_pegasus_x.py | 3 + src/transformers/models/perceiver/__init__.py | 82 +------- .../perceiver/configuration_perceiver.py | 4 +- .../perceiver/feature_extraction_perceiver.py | 5 + .../perceiver/image_processing_perceiver.py | 5 + .../models/perceiver/modeling_perceiver.py | 13 ++ .../perceiver/tokenization_perceiver.py | 3 + src/transformers/models/persimmon/__init__.py | 49 +---- .../persimmon/configuration_persimmon.py | 3 + .../models/persimmon/modeling_persimmon.py | 9 + src/transformers/models/phi/__init__.py | 52 +---- .../models/phi/configuration_phi.py | 3 + src/transformers/models/phi/modeling_phi.py | 9 + src/transformers/models/phi3/__init__.py | 52 +---- .../models/phi3/configuration_phi3.py | 3 + src/transformers/models/phi3/modeling_phi3.py | 9 + src/transformers/models/phobert/__init__.py | 11 +- .../models/phobert/tokenization_phobert.py | 3 + .../models/pix2struct/__init__.py | 69 +------ .../pix2struct/configuration_pix2struct.py | 3 + .../pix2struct/image_processing_pix2struct.py | 6 +- .../models/pix2struct/modeling_pix2struct.py | 8 + .../pix2struct/processing_pix2struct.py | 3 + src/transformers/models/plbart/__init__.py | 65 +------ .../models/plbart/configuration_plbart.py | 3 + .../models/plbart/modeling_plbart.py | 9 + .../models/plbart/tokenization_plbart.py | 5 + .../models/poolformer/__init__.py | 66 +------ .../poolformer/configuration_poolformer.py | 3 + .../feature_extraction_poolformer.py | 5 + .../poolformer/image_processing_poolformer.py | 5 + .../models/poolformer/modeling_poolformer.py | 3 + src/transformers/models/pop2piano/__init__.py | 108 +---------- .../pop2piano/configuration_pop2piano.py | 3 + .../pop2piano/feature_extraction_pop2piano.py | 13 ++ .../models/pop2piano/modeling_pop2piano.py | 3 + .../models/pop2piano/processing_pop2piano.py | 17 +- .../pop2piano/tokenization_pop2piano.py | 21 +- .../models/prophetnet/__init__.py | 49 +---- .../prophetnet/configuration_prophetnet.py | 3 + .../models/prophetnet/modeling_prophetnet.py | 10 + .../prophetnet/tokenization_prophetnet.py | 3 + src/transformers/models/pvt/__init__.py | 61 +----- .../models/pvt/configuration_pvt.py | 3 + .../models/pvt/image_processing_pvt.py | 5 + src/transformers/models/pvt/modeling_pvt.py | 3 + src/transformers/models/pvt_v2/__init__.py | 46 +---- .../models/pvt_v2/configuration_pvt_v2.py | 3 + .../models/pvt_v2/modeling_pvt_v2.py | 3 + src/transformers/models/qwen2/__init__.py | 69 +------ .../models/qwen2/configuration_qwen2.py | 3 + .../models/qwen2/modeling_qwen2.py | 10 + .../models/qwen2/tokenization_qwen2.py | 3 + .../models/qwen2/tokenization_qwen2_fast.py | 3 + src/transformers/models/qwen2_moe/__init__.py | 49 +---- .../qwen2_moe/configuration_qwen2_moe.py | 3 + .../models/qwen2_moe/modeling_qwen2_moe.py | 9 + src/transformers/models/rag/__init__.py | 70 +------ .../models/rag/configuration_rag.py | 3 + src/transformers/models/rag/modeling_rag.py | 3 + .../models/rag/modeling_tf_rag.py | 3 + src/transformers/models/rag/retrieval_rag.py | 3 + .../models/rag/tokenization_rag.py | 3 + .../models/recurrent_gemma/__init__.py | 44 +---- .../configuration_recurrent_gemma.py | 3 + .../modeling_recurrent_gemma.py | 3 + src/transformers/models/reformer/__init__.py | 88 +-------- .../models/reformer/configuration_reformer.py | 3 + .../models/reformer/modeling_reformer.py | 10 + .../models/reformer/tokenization_reformer.py | 5 + .../reformer/tokenization_reformer_fast.py | 3 + src/transformers/models/regnet/__init__.py | 94 +-------- .../models/regnet/configuration_regnet.py | 3 + .../models/regnet/modeling_flax_regnet.py | 2 + .../models/regnet/modeling_regnet.py | 3 + .../models/regnet/modeling_tf_regnet.py | 3 + src/transformers/models/rembert/__init__.py | 132 +------------ .../models/rembert/configuration_rembert.py | 3 + .../models/rembert/modeling_rembert.py | 13 ++ .../models/rembert/modeling_tf_rembert.py | 13 ++ .../models/rembert/tokenization_rembert.py | 5 + .../rembert/tokenization_rembert_fast.py | 3 + src/transformers/models/resnet/__init__.py | 91 +-------- .../models/resnet/configuration_resnet.py | 3 + .../models/resnet/modeling_flax_resnet.py | 2 + .../models/resnet/modeling_resnet.py | 3 + .../models/resnet/modeling_tf_resnet.py | 3 + src/transformers/models/roberta/__init__.py | 149 +------------- .../models/roberta/configuration_roberta.py | 3 + .../models/roberta/modeling_flax_roberta.py | 11 ++ .../models/roberta/modeling_roberta.py | 12 ++ .../models/roberta/modeling_tf_roberta.py | 13 ++ .../models/roberta/tokenization_roberta.py | 3 + .../roberta/tokenization_roberta_fast.py | 3 + .../models/roberta_prelayernorm/__init__.py | 134 +------------ .../configuration_roberta_prelayernorm.py | 3 + .../modeling_flax_roberta_prelayernorm.py | 11 ++ .../modeling_roberta_prelayernorm.py | 12 ++ .../modeling_tf_roberta_prelayernorm.py | 13 ++ src/transformers/models/roc_bert/__init__.py | 74 +------ .../models/roc_bert/configuration_roc_bert.py | 3 + .../models/roc_bert/modeling_roc_bert.py | 14 ++ .../models/roc_bert/tokenization_roc_bert.py | 3 + src/transformers/models/roformer/__init__.py | 153 +-------------- .../models/roformer/configuration_roformer.py | 3 + .../models/roformer/modeling_flax_roformer.py | 10 + .../models/roformer/modeling_roformer.py | 13 ++ .../models/roformer/modeling_tf_roformer.py | 13 ++ .../models/roformer/tokenization_roformer.py | 3 + .../roformer/tokenization_roformer_fast.py | 3 + src/transformers/models/rwkv/__init__.py | 43 +--- .../models/rwkv/configuration_rwkv.py | 3 + src/transformers/models/rwkv/modeling_rwkv.py | 3 + src/transformers/models/sam/__init__.py | 89 +-------- .../models/sam/configuration_sam.py | 3 + .../models/sam/image_processing_sam.py | 5 + src/transformers/models/sam/modeling_sam.py | 3 + .../models/sam/modeling_tf_sam.py | 3 + src/transformers/models/sam/processing_sam.py | 3 + .../models/seamless_m4t/__init__.py | 98 +--------- .../configuration_seamless_m4t.py | 3 + .../feature_extraction_seamless_m4t.py | 3 + .../seamless_m4t/modeling_seamless_m4t.py | 14 ++ .../seamless_m4t/processing_seamless_m4t.py | 3 + .../seamless_m4t/tokenization_seamless_m4t.py | 5 + .../tokenization_seamless_m4t_fast.py | 3 + .../models/seamless_m4t_v2/__init__.py | 48 +---- .../configuration_seamless_m4t_v2.py | 3 + .../modeling_seamless_m4t_v2.py | 10 + src/transformers/models/segformer/__init__.py | 97 +--------- .../segformer/configuration_segformer.py | 3 + .../segformer/feature_extraction_segformer.py | 5 + .../segformer/image_processing_segformer.py | 5 + .../models/segformer/modeling_segformer.py | 9 + .../models/segformer/modeling_tf_segformer.py | 10 + src/transformers/models/seggpt/__init__.py | 53 +---- .../models/seggpt/configuration_seggpt.py | 3 + .../models/seggpt/image_processing_seggpt.py | 5 + .../models/seggpt/modeling_seggpt.py | 3 + src/transformers/models/sew/__init__.py | 39 +--- .../models/sew/configuration_sew.py | 3 + src/transformers/models/sew/modeling_sew.py | 3 + src/transformers/models/sew_d/__init__.py | 39 +--- .../models/sew_d/configuration_sew_d.py | 3 + .../models/sew_d/modeling_sew_d.py | 3 + src/transformers/models/siglip/__init__.py | 96 +-------- .../models/siglip/configuration_siglip.py | 3 + .../models/siglip/image_processing_siglip.py | 5 + .../models/siglip/modeling_siglip.py | 9 + .../models/siglip/processing_siglip.py | 3 + .../models/siglip/tokenization_siglip.py | 5 + .../models/speech_encoder_decoder/__init__.py | 46 +---- .../configuration_speech_encoder_decoder.py | 3 + .../modeling_flax_speech_encoder_decoder.py | 3 + .../modeling_speech_encoder_decoder.py | 3 + .../models/speech_to_text/__init__.py | 93 +-------- .../configuration_speech_to_text.py | 3 + .../feature_extraction_speech_to_text.py | 3 + .../speech_to_text/modeling_speech_to_text.py | 3 + .../modeling_tf_speech_to_text.py | 8 + .../processing_speech_to_text.py | 3 + .../tokenization_speech_to_text.py | 5 + src/transformers/models/speecht5/__init__.py | 78 +------- .../models/speecht5/configuration_speecht5.py | 3 + .../speecht5/feature_extraction_speecht5.py | 3 + .../models/speecht5/modeling_speecht5.py | 10 + .../models/speecht5/processing_speecht5.py | 3 + .../models/speecht5/tokenization_speecht5.py | 5 + src/transformers/models/splinter/__init__.py | 64 +----- .../models/splinter/configuration_splinter.py | 3 + .../models/splinter/modeling_splinter.py | 3 + .../models/splinter/tokenization_splinter.py | 3 + .../splinter/tokenization_splinter_fast.py | 3 + .../models/squeezebert/__init__.py | 76 +------- .../squeezebert/configuration_squeezebert.py | 3 + .../squeezebert/modeling_squeezebert.py | 11 ++ .../squeezebert/tokenization_squeezebert.py | 3 + .../tokenization_squeezebert_fast.py | 3 + src/transformers/models/stablelm/__init__.py | 49 +---- .../models/stablelm/configuration_stablelm.py | 3 + .../models/stablelm/modeling_stablelm.py | 9 + .../models/starcoder2/__init__.py | 49 +---- .../starcoder2/configuration_starcoder2.py | 3 + .../models/starcoder2/modeling_starcoder2.py | 10 + .../models/superpoint/__init__.py | 55 +----- .../superpoint/configuration_superpoint.py | 3 + .../superpoint/image_processing_superpoint.py | 6 +- .../models/superpoint/modeling_superpoint.py | 3 + .../models/swiftformer/__init__.py | 73 +------ .../swiftformer/configuration_swiftformer.py | 3 + .../swiftformer/modeling_swiftformer.py | 3 + .../swiftformer/modeling_tf_swiftformer.py | 8 + src/transformers/models/swin/__init__.py | 68 +------ .../models/swin/configuration_swin.py | 3 + src/transformers/models/swin/modeling_swin.py | 9 + .../models/swin/modeling_tf_swin.py | 9 + src/transformers/models/swin2sr/__init__.py | 59 +----- .../models/swin2sr/configuration_swin2sr.py | 3 + .../swin2sr/image_processing_swin2sr.py | 5 + .../models/swin2sr/modeling_swin2sr.py | 3 + src/transformers/models/swinv2/__init__.py | 45 +---- .../models/swinv2/configuration_swinv2.py | 3 + .../models/swinv2/modeling_swinv2.py | 9 + .../models/switch_transformers/__init__.py | 61 +----- .../configuration_switch_transformers.py | 3 + .../modeling_switch_transformers.py | 10 + src/transformers/models/t5/__init__.py | 145 +------------- .../models/t5/configuration_t5.py | 3 + .../models/t5/modeling_flax_t5.py | 2 + src/transformers/models/t5/modeling_t5.py | 14 ++ src/transformers/models/t5/modeling_tf_t5.py | 5 + src/transformers/models/t5/tokenization_t5.py | 5 + .../models/t5/tokenization_t5_fast.py | 3 + .../models/table_transformer/__init__.py | 46 +---- .../configuration_table_transformer.py | 3 + .../modeling_table_transformer.py | 3 + src/transformers/models/tapas/__init__.py | 78 +------- .../models/tapas/configuration_tapas.py | 3 + .../models/tapas/modeling_tapas.py | 10 + .../models/tapas/modeling_tf_tapas.py | 10 + .../models/tapas/tokenization_tapas.py | 3 + .../time_series_transformer/__init__.py | 41 +--- .../configuration_time_series_transformer.py | 3 + .../modeling_time_series_transformer.py | 3 + .../models/timesformer/__init__.py | 38 +--- .../timesformer/configuration_timesformer.py | 3 + .../timesformer/modeling_timesformer.py | 3 + .../models/timm_backbone/__init__.py | 44 +---- .../configuration_timm_backbone.py | 3 + .../timm_backbone/modeling_timm_backbone.py | 3 + src/transformers/models/trocr/__init__.py | 44 +---- .../models/trocr/configuration_trocr.py | 3 + .../models/trocr/modeling_trocr.py | 3 + .../models/trocr/processing_trocr.py | 3 + src/transformers/models/tvp/__init__.py | 60 +----- .../models/tvp/configuration_tvp.py | 3 + .../models/tvp/image_processing_tvp.py | 5 + src/transformers/models/tvp/modeling_tvp.py | 3 + src/transformers/models/tvp/processing_tvp.py | 3 + src/transformers/models/udop/__init__.py | 84 +------- .../models/udop/configuration_udop.py | 3 + src/transformers/models/udop/modeling_udop.py | 3 + .../models/udop/processing_udop.py | 3 + .../models/udop/tokenization_udop.py | 5 + .../models/udop/tokenization_udop_fast.py | 3 + src/transformers/models/umt5/__init__.py | 45 +---- .../models/umt5/configuration_umt5.py | 3 + src/transformers/models/umt5/modeling_umt5.py | 11 ++ src/transformers/models/unispeech/__init__.py | 46 +---- .../unispeech/configuration_unispeech.py | 3 + .../models/unispeech/modeling_unispeech.py | 9 + .../models/unispeech_sat/__init__.py | 52 +---- .../configuration_unispeech_sat.py | 3 + .../unispeech_sat/modeling_unispeech_sat.py | 11 ++ src/transformers/models/univnet/__init__.py | 45 +---- .../models/univnet/configuration_univnet.py | 3 + .../univnet/feature_extraction_univnet.py | 3 + .../models/univnet/modeling_univnet.py | 3 + src/transformers/models/upernet/__init__.py | 35 +--- .../models/upernet/configuration_upernet.py | 3 + .../models/upernet/modeling_upernet.py | 3 + .../models/video_llava/__init__.py | 58 +----- .../video_llava/configuration_video_llava.py | 3 + .../image_processing_video_llava.py | 5 + .../video_llava/modeling_video_llava.py | 3 + .../video_llava/processing_video_llava.py | 3 + src/transformers/models/videomae/__init__.py | 60 +----- .../models/videomae/configuration_videomae.py | 3 + .../videomae/feature_extraction_videomae.py | 5 + .../videomae/image_processing_videomae.py | 5 + .../models/videomae/modeling_videomae.py | 3 + src/transformers/models/vilt/__init__.py | 71 +------ .../models/vilt/configuration_vilt.py | 3 + .../models/vilt/feature_extraction_vilt.py | 5 + .../models/vilt/image_processing_vilt.py | 5 + src/transformers/models/vilt/modeling_vilt.py | 11 ++ .../models/vilt/processing_vilt.py | 5 + src/transformers/models/vipllava/__init__.py | 37 +--- .../models/vipllava/configuration_vipllava.py | 3 + .../models/vipllava/modeling_vipllava.py | 5 +- .../models/vision_encoder_decoder/__init__.py | 71 +------ .../configuration_vision_encoder_decoder.py | 5 +- .../modeling_flax_vision_encoder_decoder.py | 3 + .../modeling_tf_vision_encoder_decoder.py | 3 + .../modeling_vision_encoder_decoder.py | 3 + .../vision_text_dual_encoder/__init__.py | 77 +------- .../configuration_vision_text_dual_encoder.py | 3 + .../modeling_flax_vision_text_dual_encoder.py | 2 + .../modeling_tf_vision_text_dual_encoder.py | 3 + .../modeling_vision_text_dual_encoder.py | 3 + .../processing_vision_text_dual_encoder.py | 3 + .../models/visual_bert/__init__.py | 48 +---- .../visual_bert/configuration_visual_bert.py | 3 + .../visual_bert/modeling_visual_bert.py | 11 ++ src/transformers/models/vit/__init__.py | 126 +----------- .../models/vit/configuration_vit.py | 3 + .../models/vit/feature_extraction_vit.py | 5 + .../models/vit/image_processing_vit.py | 5 + .../models/vit/modeling_flax_vit.py | 2 + .../models/vit/modeling_tf_vit.py | 3 + src/transformers/models/vit/modeling_vit.py | 3 + src/transformers/models/vit_mae/__init__.py | 66 +------ .../models/vit_mae/configuration_vit_mae.py | 3 + .../models/vit_mae/modeling_tf_vit_mae.py | 3 + .../models/vit_mae/modeling_vit_mae.py | 3 + src/transformers/models/vit_msn/__init__.py | 36 +--- .../models/vit_msn/configuration_vit_msn.py | 3 + .../models/vit_msn/modeling_vit_msn.py | 3 + src/transformers/models/vitdet/__init__.py | 40 +--- .../models/vitdet/configuration_vitdet.py | 3 + .../models/vitdet/modeling_vitdet.py | 3 + src/transformers/models/vitmatte/__init__.py | 56 +----- .../models/vitmatte/configuration_vitmatte.py | 3 + .../vitmatte/image_processing_vitmatte.py | 5 + .../models/vitmatte/modeling_vitmatte.py | 3 + src/transformers/models/vits/__init__.py | 47 +---- .../models/vits/configuration_vits.py | 3 + src/transformers/models/vits/modeling_vits.py | 3 + .../models/vits/tokenization_vits.py | 3 + src/transformers/models/vivit/__init__.py | 72 +------ .../models/vivit/configuration_vivit.py | 3 + .../models/vivit/image_processing_vivit.py | 5 + .../models/vivit/modeling_vivit.py | 3 + src/transformers/models/wav2vec2/__init__.py | 120 ++---------- .../models/wav2vec2/configuration_wav2vec2.py | 3 + .../wav2vec2/feature_extraction_wav2vec2.py | 3 + .../models/wav2vec2/modeling_flax_wav2vec2.py | 2 + .../models/wav2vec2/modeling_tf_wav2vec2.py | 9 + .../models/wav2vec2/modeling_wav2vec2.py | 12 ++ .../models/wav2vec2/processing_wav2vec2.py | 3 + .../models/wav2vec2/tokenization_wav2vec2.py | 3 + .../models/wav2vec2_bert/__init__.py | 50 +---- .../configuration_wav2vec2_bert.py | 3 + .../wav2vec2_bert/modeling_wav2vec2_bert.py | 10 + .../wav2vec2_bert/processing_wav2vec2_bert.py | 3 + .../models/wav2vec2_conformer/__init__.py | 49 +---- .../configuration_wav2vec2_conformer.py | 3 + .../modeling_wav2vec2_conformer.py | 11 ++ .../models/wav2vec2_phoneme/__init__.py | 9 +- .../tokenization_wav2vec2_phoneme.py | 3 + .../models/wav2vec2_with_lm/__init__.py | 9 +- .../processing_wav2vec2_with_lm.py | 3 + src/transformers/models/wavlm/__init__.py | 42 +--- .../models/wavlm/configuration_wavlm.py | 3 + .../models/wavlm/modeling_wavlm.py | 10 + src/transformers/models/whisper/__init__.py | 128 ++---------- .../models/whisper/configuration_whisper.py | 3 + .../whisper/feature_extraction_whisper.py | 5 +- .../models/whisper/modeling_flax_whisper.py | 7 + .../models/whisper/modeling_tf_whisper.py | 3 + .../models/whisper/modeling_whisper.py | 9 + .../models/whisper/processing_whisper.py | 3 + .../models/whisper/tokenization_whisper.py | 3 + .../whisper/tokenization_whisper_fast.py | 3 + src/transformers/models/x_clip/__init__.py | 51 +---- .../models/x_clip/configuration_x_clip.py | 3 + .../models/x_clip/modeling_x_clip.py | 3 + .../models/x_clip/processing_x_clip.py | 3 + src/transformers/models/xglm/__init__.py | 124 +----------- .../models/xglm/configuration_xglm.py | 3 + .../models/xglm/modeling_flax_xglm.py | 2 + .../models/xglm/modeling_tf_xglm.py | 3 + src/transformers/models/xglm/modeling_xglm.py | 3 + .../models/xglm/tokenization_xglm.py | 5 + .../models/xglm/tokenization_xglm_fast.py | 3 + src/transformers/models/xlm/__init__.py | 88 +-------- .../models/xlm/configuration_xlm.py | 3 + .../models/xlm/modeling_tf_xlm.py | 12 ++ src/transformers/models/xlm/modeling_xlm.py | 12 ++ .../models/xlm/tokenization_xlm.py | 3 + .../models/xlm_roberta/__init__.py | 167 +--------------- .../xlm_roberta/configuration_xlm_roberta.py | 3 + .../xlm_roberta/modeling_flax_xlm_roberta.py | 11 ++ .../xlm_roberta/modeling_tf_xlm_roberta.py | 13 ++ .../xlm_roberta/modeling_xlm_roberta.py | 12 ++ .../xlm_roberta/tokenization_xlm_roberta.py | 5 + .../tokenization_xlm_roberta_fast.py | 3 + .../models/xlm_roberta_xl/__init__.py | 55 +----- .../configuration_xlm_roberta_xl.py | 3 + .../xlm_roberta_xl/modeling_xlm_roberta_xl.py | 12 ++ src/transformers/models/xlnet/__init__.py | 126 +----------- .../models/xlnet/configuration_xlnet.py | 3 + .../models/xlnet/modeling_tf_xlnet.py | 12 ++ .../models/xlnet/modeling_xlnet.py | 13 ++ .../models/xlnet/tokenization_xlnet.py | 5 + .../models/xlnet/tokenization_xlnet_fast.py | 3 + src/transformers/models/xmod/__init__.py | 66 +------ .../models/xmod/configuration_xmod.py | 3 + src/transformers/models/xmod/modeling_xmod.py | 12 ++ src/transformers/models/yolos/__init__.py | 58 +----- .../models/yolos/configuration_yolos.py | 3 + .../models/yolos/feature_extraction_yolos.py | 5 + .../models/yolos/image_processing_yolos.py | 5 + .../models/yolos/modeling_yolos.py | 3 + src/transformers/models/yoso/__init__.py | 48 +---- .../models/yoso/configuration_yoso.py | 3 + src/transformers/models/yoso/modeling_yoso.py | 11 ++ 1144 files changed, 6381 insertions(+), 15589 deletions(-) create mode 100644 src/transformers/models/mt5/tokenization_mt5.py create mode 100644 src/transformers/models/mt5/tokenization_mt5_fast.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 483e315e260c..66069aea73db 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -116,7 +116,7 @@ jobs: command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - - run: python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1) + - run: python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! Have you added this object to the __all__ object of the module? 🚨'; exit 1) - run: ruff check examples tests src utils - run: ruff format tests src utils --check - run: python utils/custom_init_isort.py --check_only diff --git a/Makefile b/Makefile index d3998327cc71..8a094ac65336 100644 --- a/Makefile +++ b/Makefile @@ -50,7 +50,7 @@ repo-consistency: # this target runs checks on all files quality: - @python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1) + @python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! Have you added this object to the __all__ object of the module? 🚨'; exit 1) ruff check $(check_dirs) setup.py conftest.py ruff format --check $(check_dirs) setup.py conftest.py python utils/sort_auto_mappings.py --check_only diff --git a/src/transformers/models/audio_spectrogram_transformer/__init__.py b/src/transformers/models/audio_spectrogram_transformer/__init__.py index 9f1d65e1aac8..c9d94a73972a 100644 --- a/src/transformers/models/audio_spectrogram_transformer/__init__.py +++ b/src/transformers/models/audio_spectrogram_transformer/__init__.py @@ -13,47 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_audio_spectrogram_transformer": ["ASTConfig"], - "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_audio_spectrogram_transformer"] = [ - "ASTForAudioClassification", - "ASTModel", - "ASTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_audio_spectrogram_transformer import ( - ASTConfig, - ) - from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_audio_spectrogram_transformer import ( - ASTForAudioClassification, - ASTModel, - ASTPreTrainedModel, - ) - - + from .configuration_audio_spectrogram_transformer import * + from .feature_extraction_audio_spectrogram_transformer import * + from .modeling_audio_spectrogram_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py index 7980667a68d7..77bec930236f 100644 --- a/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py @@ -126,3 +126,6 @@ def __init__( # generative parameters deprecation cycle, overwriting this function prevents this from happening. def _get_non_default_generation_parameters(self) -> Dict[str, Any]: return {} + + +__all__ = ["ASTConfig"] diff --git a/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py index 2bd122b4098c..b181afe19e9e 100644 --- a/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py @@ -234,3 +234,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["ASTFeatureExtractor"] diff --git a/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py b/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py index beb249202b96..7f6364e77e41 100644 --- a/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +++ b/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py @@ -654,3 +654,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ASTPreTrainedModel", "ASTModel", "ASTForAudioClassification"] diff --git a/src/transformers/models/autoformer/__init__.py b/src/transformers/models/autoformer/__init__.py index 1ef70173e30a..48a329608039 100644 --- a/src/transformers/models/autoformer/__init__.py +++ b/src/transformers/models/autoformer/__init__.py @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_autoformer": ["AutoformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_autoformer"] = [ - "AutoformerForPrediction", - "AutoformerModel", - "AutoformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_autoformer import ( - AutoformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_autoformer import ( - AutoformerForPrediction, - AutoformerModel, - AutoformerPreTrainedModel, - ) - + from .configuration_autoformer import * + from .modeling_autoformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/autoformer/configuration_autoformer.py b/src/transformers/models/autoformer/configuration_autoformer.py index f5a4356ce8b4..aba83f19a5d9 100644 --- a/src/transformers/models/autoformer/configuration_autoformer.py +++ b/src/transformers/models/autoformer/configuration_autoformer.py @@ -240,3 +240,6 @@ def _number_of_features(self) -> int: + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features ) + + +__all__ = ["AutoformerConfig"] diff --git a/src/transformers/models/autoformer/modeling_autoformer.py b/src/transformers/models/autoformer/modeling_autoformer.py index 5a5b5f24397b..c1259d4129a2 100644 --- a/src/transformers/models/autoformer/modeling_autoformer.py +++ b/src/transformers/models/autoformer/modeling_autoformer.py @@ -2150,3 +2150,6 @@ def generate( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) ) + + +__all__ = ["AutoformerPreTrainedModel", "AutoformerModel", "AutoformerForPrediction"] diff --git a/src/transformers/models/bark/__init__.py b/src/transformers/models/bark/__init__.py index 4cb1a606cf65..df7b405e63bf 100644 --- a/src/transformers/models/bark/__init__.py +++ b/src/transformers/models/bark/__init__.py @@ -13,63 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_bark": [ - "BarkCoarseConfig", - "BarkConfig", - "BarkFineConfig", - "BarkSemanticConfig", - ], - "processing_bark": ["BarkProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bark"] = [ - "BarkFineModel", - "BarkSemanticModel", - "BarkCoarseModel", - "BarkModel", - "BarkPreTrainedModel", - "BarkCausalModel", - ] - if TYPE_CHECKING: - from .configuration_bark import ( - BarkCoarseConfig, - BarkConfig, - BarkFineConfig, - BarkSemanticConfig, - ) - from .processing_bark import BarkProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bark import ( - BarkCausalModel, - BarkCoarseModel, - BarkFineModel, - BarkModel, - BarkPreTrainedModel, - BarkSemanticModel, - ) - + from .configuration_bark import * + from .modeling_bark import * + from .processing_bark import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bark/configuration_bark.py b/src/transformers/models/bark/configuration_bark.py index 6dd08b65e89e..e73208a3c766 100644 --- a/src/transformers/models/bark/configuration_bark.py +++ b/src/transformers/models/bark/configuration_bark.py @@ -323,3 +323,6 @@ def from_sub_model_configs( codec_config=codec_config.to_dict(), **kwargs, ) + + +__all__ = ["BarkSemanticConfig", "BarkCoarseConfig", "BarkFineConfig", "BarkConfig"] diff --git a/src/transformers/models/bark/modeling_bark.py b/src/transformers/models/bark/modeling_bark.py index 5aad7b23a8a6..52f60f426692 100644 --- a/src/transformers/models/bark/modeling_bark.py +++ b/src/transformers/models/bark/modeling_bark.py @@ -546,6 +546,8 @@ def device(self) -> torch.device: # GPT2-like autoregressive model + + class BarkCausalModel(BarkPreTrainedModel): config_class = BarkSubModelConfig @@ -1811,3 +1813,13 @@ def _check_and_enable_flash_attn_2( config.coarse_acoustics_config._attn_implementation = config._attn_implementation config.fine_acoustics_config._attn_implementation = config._attn_implementation return config + + +__all__ = [ + "BarkPreTrainedModel", + "BarkCausalModel", + "BarkFineModel", + "BarkCoarseModel", + "BarkSemanticModel", + "BarkModel", +] diff --git a/src/transformers/models/bark/processing_bark.py b/src/transformers/models/bark/processing_bark.py index 53715f326042..0bed6ca79f41 100644 --- a/src/transformers/models/bark/processing_bark.py +++ b/src/transformers/models/bark/processing_bark.py @@ -285,3 +285,6 @@ def __call__( encoded_text["history_prompt"] = voice_preset return encoded_text + + +__all__ = ["BarkProcessor"] diff --git a/src/transformers/models/bart/__init__.py b/src/transformers/models/bart/__init__.py index d538fbb7d343..cd1e1d8eb778 100644 --- a/src/transformers/models/bart/__init__.py +++ b/src/transformers/models/bart/__init__.py @@ -13,134 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_bart": ["BartConfig", "BartOnnxConfig"], - "tokenization_bart": ["BartTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_bart_fast"] = ["BartTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bart"] = [ - "BartForCausalLM", - "BartForConditionalGeneration", - "BartForQuestionAnswering", - "BartForSequenceClassification", - "BartModel", - "BartPreTrainedModel", - "BartPretrainedModel", - "PretrainedBartModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_bart"] = [ - "TFBartForConditionalGeneration", - "TFBartForSequenceClassification", - "TFBartModel", - "TFBartPretrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_bart"] = [ - "FlaxBartDecoderPreTrainedModel", - "FlaxBartForCausalLM", - "FlaxBartForConditionalGeneration", - "FlaxBartForQuestionAnswering", - "FlaxBartForSequenceClassification", - "FlaxBartModel", - "FlaxBartPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_bart import BartConfig, BartOnnxConfig - from .tokenization_bart import BartTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_bart_fast import BartTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bart import ( - BartForCausalLM, - BartForConditionalGeneration, - BartForQuestionAnswering, - BartForSequenceClassification, - BartModel, - BartPreTrainedModel, - BartPretrainedModel, - PretrainedBartModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_bart import ( - TFBartForConditionalGeneration, - TFBartForSequenceClassification, - TFBartModel, - TFBartPretrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_bart import ( - FlaxBartDecoderPreTrainedModel, - FlaxBartForCausalLM, - FlaxBartForConditionalGeneration, - FlaxBartForQuestionAnswering, - FlaxBartForSequenceClassification, - FlaxBartModel, - FlaxBartPreTrainedModel, - ) - + from .configuration_bart import * + from .modeling_bart import * + from .modeling_flax_bart import * + from .modeling_tf_bart import * + from .tokenization_bart import * + from .tokenization_bart_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bart/configuration_bart.py b/src/transformers/models/bart/configuration_bart.py index a3bc7f38653a..fb4513bbd20f 100644 --- a/src/transformers/models/bart/configuration_bart.py +++ b/src/transformers/models/bart/configuration_bart.py @@ -18,10 +18,10 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import TensorType, is_torch_available, logging @@ -400,3 +400,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) + + +__all__ = ["BartConfig", "BartOnnxConfig"] diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index fa928d05caa8..a004e7db13c7 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -2222,3 +2222,16 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "BartPreTrainedModel", + "PretrainedBartModel", + "BartPretrainedModel", + "BartDecoder", + "BartModel", + "BartForConditionalGeneration", + "BartForSequenceClassification", + "BartForQuestionAnswering", + "BartForCausalLM", +] diff --git a/src/transformers/models/bart/modeling_flax_bart.py b/src/transformers/models/bart/modeling_flax_bart.py index 634c256fe7d8..169d3dc4695f 100644 --- a/src/transformers/models/bart/modeling_flax_bart.py +++ b/src/transformers/models/bart/modeling_flax_bart.py @@ -1993,3 +1993,13 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxBartPreTrainedModel", + "FlaxBartModel", + "FlaxBartForConditionalGeneration", + "FlaxBartForSequenceClassification", + "FlaxBartForQuestionAnswering", + "FlaxBartDecoderPreTrainedModel", + "FlaxBartForCausalLM", +] diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 5ebde8cba60c..70edb1efd96b 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -1709,3 +1709,12 @@ def build(self, input_shape=None): if getattr(self, "classification_head", None) is not None: with tf.name_scope(self.classification_head.name): self.classification_head.build(None) + + +__all__ = [ + "TFBartPretrainedModel", + "TFBartModel", + "TFBartForConditionalGeneration", + "TFBartForSequenceClassification", + "TFBartMainLayer", +] diff --git a/src/transformers/models/bart/tokenization_bart.py b/src/transformers/models/bart/tokenization_bart.py index 5207b9c92b07..4c516cb81be0 100644 --- a/src/transformers/models/bart/tokenization_bart.py +++ b/src/transformers/models/bart/tokenization_bart.py @@ -388,3 +388,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["BartTokenizer"] diff --git a/src/transformers/models/bart/tokenization_bart_fast.py b/src/transformers/models/bart/tokenization_bart_fast.py index e9fb8497c907..4586ab4797e5 100644 --- a/src/transformers/models/bart/tokenization_bart_fast.py +++ b/src/transformers/models/bart/tokenization_bart_fast.py @@ -274,3 +274,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["BartTokenizerFast"] diff --git a/src/transformers/models/barthez/__init__.py b/src/transformers/models/barthez/__init__.py index 084cd22bdf1d..40a324de9fb4 100644 --- a/src/transformers/models/barthez/__init__.py +++ b/src/transformers/models/barthez/__init__.py @@ -11,49 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_barthez"] = ["BarthezTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_barthez_fast"] = ["BarthezTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_barthez import BarthezTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_barthez_fast import BarthezTokenizerFast - + from .tokenization_barthez import * + from .tokenization_barthez_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/barthez/tokenization_barthez.py b/src/transformers/models/barthez/tokenization_barthez.py index 46decddb3e10..cb1e4a6d8166 100644 --- a/src/transformers/models/barthez/tokenization_barthez.py +++ b/src/transformers/models/barthez/tokenization_barthez.py @@ -22,6 +22,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -34,6 +35,7 @@ # TODO this class is useless. This is the most standard sentencpiece model. Let's find which one is closest and nuke this. +@export(backends=("sentencepiece",)) class BarthezTokenizer(PreTrainedTokenizer): """ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a BARThez tokenizer. Based on @@ -284,3 +286,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["BarthezTokenizer"] diff --git a/src/transformers/models/barthez/tokenization_barthez_fast.py b/src/transformers/models/barthez/tokenization_barthez_fast.py index df8cc7757e96..a1d95ef03e48 100644 --- a/src/transformers/models/barthez/tokenization_barthez_fast.py +++ b/src/transformers/models/barthez/tokenization_barthez_fast.py @@ -192,3 +192,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["BarthezTokenizerFast"] diff --git a/src/transformers/models/bartpho/__init__.py b/src/transformers/models/bartpho/__init__.py index c20d7370c656..64ebb8b91914 100644 --- a/src/transformers/models/bartpho/__init__.py +++ b/src/transformers/models/bartpho/__init__.py @@ -11,32 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_bartpho"] = ["BartphoTokenizer"] if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_bartpho import BartphoTokenizer - + from .tokenization_bartpho import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bartpho/tokenization_bartpho.py b/src/transformers/models/bartpho/tokenization_bartpho.py index df121f26e255..9658e32a5810 100644 --- a/src/transformers/models/bartpho/tokenization_bartpho.py +++ b/src/transformers/models/bartpho/tokenization_bartpho.py @@ -22,6 +22,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -31,6 +32,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} +@export(backends=("sentencepiece",)) class BartphoTokenizer(PreTrainedTokenizer): """ Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -311,3 +313,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fp.write(f"{str(token)} \n") return out_vocab_file, out_monolingual_vocab_file + + +__all__ = ["BartphoTokenizer"] diff --git a/src/transformers/models/beit/__init__.py b/src/transformers/models/beit/__init__.py index c2f49240d6e6..1e2e666129d4 100644 --- a/src/transformers/models/beit/__init__.py +++ b/src/transformers/models/beit/__init__.py @@ -11,100 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = {"configuration_beit": ["BeitConfig", "BeitOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_beit"] = ["BeitFeatureExtractor"] - _import_structure["image_processing_beit"] = ["BeitImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_beit"] = [ - "BeitForImageClassification", - "BeitForMaskedImageModeling", - "BeitForSemanticSegmentation", - "BeitModel", - "BeitPreTrainedModel", - "BeitBackbone", - ] - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_beit"] = [ - "FlaxBeitForImageClassification", - "FlaxBeitForMaskedImageModeling", - "FlaxBeitModel", - "FlaxBeitPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_beit import BeitConfig, BeitOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_beit import BeitFeatureExtractor - from .image_processing_beit import BeitImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_beit import ( - BeitBackbone, - BeitForImageClassification, - BeitForMaskedImageModeling, - BeitForSemanticSegmentation, - BeitModel, - BeitPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_beit import ( - FlaxBeitForImageClassification, - FlaxBeitForMaskedImageModeling, - FlaxBeitModel, - FlaxBeitPreTrainedModel, - ) - - + from .configuration_beit import * + from .feature_extraction_beit import * + from .image_processing_beit import * + from .modeling_beit import * + from .modeling_flax_beit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/beit/configuration_beit.py b/src/transformers/models/beit/configuration_beit.py index f0f3c2582c35..834988258c6b 100644 --- a/src/transformers/models/beit/configuration_beit.py +++ b/src/transformers/models/beit/configuration_beit.py @@ -224,3 +224,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["BeitConfig", "BeitOnnxConfig"] diff --git a/src/transformers/models/beit/feature_extraction_beit.py b/src/transformers/models/beit/feature_extraction_beit.py index 59dacb4ae51f..dd82f1794a13 100644 --- a/src/transformers/models/beit/feature_extraction_beit.py +++ b/src/transformers/models/beit/feature_extraction_beit.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_beit import BeitImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class BeitFeatureExtractor(BeitImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["BeitFeatureExtractor"] diff --git a/src/transformers/models/beit/image_processing_beit.py b/src/transformers/models/beit/image_processing_beit.py index 7398381b2229..6d562336e7cf 100644 --- a/src/transformers/models/beit/image_processing_beit.py +++ b/src/transformers/models/beit/image_processing_beit.py @@ -42,6 +42,7 @@ logging, ) from ...utils.deprecation import deprecate_kwarg +from ...utils.import_utils import export if is_vision_available(): @@ -54,6 +55,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class BeitImageProcessor(BaseImageProcessor): r""" Constructs a BEiT image processor. @@ -510,3 +512,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["BeitImageProcessor"] diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index f972e021f3e2..a2488a9414f5 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -1576,3 +1576,13 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "BeitPreTrainedModel", + "BeitModel", + "BeitForMaskedImageModeling", + "BeitForImageClassification", + "BeitForSemanticSegmentation", + "BeitBackbone", +] diff --git a/src/transformers/models/beit/modeling_flax_beit.py b/src/transformers/models/beit/modeling_flax_beit.py index c1da64d263a2..d3cb22defc77 100644 --- a/src/transformers/models/beit/modeling_flax_beit.py +++ b/src/transformers/models/beit/modeling_flax_beit.py @@ -946,3 +946,10 @@ class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel): append_replace_return_docstrings( FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig ) + +__all__ = [ + "FlaxBeitPreTrainedModel", + "FlaxBeitModel", + "FlaxBeitForMaskedImageModeling", + "FlaxBeitForImageClassification", +] diff --git a/src/transformers/models/bert/__init__.py b/src/transformers/models/bert/__init__.py index 17048a5d1c96..680213297b15 100644 --- a/src/transformers/models/bert/__init__.py +++ b/src/transformers/models/bert/__init__.py @@ -11,183 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tensorflow_text_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_bert": ["BertConfig", "BertOnnxConfig"], - "tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_bert_fast"] = ["BertTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bert"] = [ - "BertForMaskedLM", - "BertForMultipleChoice", - "BertForNextSentencePrediction", - "BertForPreTraining", - "BertForQuestionAnswering", - "BertForSequenceClassification", - "BertForTokenClassification", - "BertLayer", - "BertLMHeadModel", - "BertModel", - "BertPreTrainedModel", - "load_tf_weights_in_bert", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_bert"] = [ - "TFBertEmbeddings", - "TFBertForMaskedLM", - "TFBertForMultipleChoice", - "TFBertForNextSentencePrediction", - "TFBertForPreTraining", - "TFBertForQuestionAnswering", - "TFBertForSequenceClassification", - "TFBertForTokenClassification", - "TFBertLMHeadModel", - "TFBertMainLayer", - "TFBertModel", - "TFBertPreTrainedModel", - ] -try: - if not is_tensorflow_text_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_bert_tf"] = ["TFBertTokenizer"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_bert"] = [ - "FlaxBertForCausalLM", - "FlaxBertForMaskedLM", - "FlaxBertForMultipleChoice", - "FlaxBertForNextSentencePrediction", - "FlaxBertForPreTraining", - "FlaxBertForQuestionAnswering", - "FlaxBertForSequenceClassification", - "FlaxBertForTokenClassification", - "FlaxBertModel", - "FlaxBertPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_bert import BertConfig, BertOnnxConfig - from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_bert_fast import BertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bert import ( - BertForMaskedLM, - BertForMultipleChoice, - BertForNextSentencePrediction, - BertForPreTraining, - BertForQuestionAnswering, - BertForSequenceClassification, - BertForTokenClassification, - BertLayer, - BertLMHeadModel, - BertModel, - BertPreTrainedModel, - load_tf_weights_in_bert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_bert import ( - TFBertEmbeddings, - TFBertForMaskedLM, - TFBertForMultipleChoice, - TFBertForNextSentencePrediction, - TFBertForPreTraining, - TFBertForQuestionAnswering, - TFBertForSequenceClassification, - TFBertForTokenClassification, - TFBertLMHeadModel, - TFBertMainLayer, - TFBertModel, - TFBertPreTrainedModel, - ) - - try: - if not is_tensorflow_text_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_bert_tf import TFBertTokenizer - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_bert import ( - FlaxBertForCausalLM, - FlaxBertForMaskedLM, - FlaxBertForMultipleChoice, - FlaxBertForNextSentencePrediction, - FlaxBertForPreTraining, - FlaxBertForQuestionAnswering, - FlaxBertForSequenceClassification, - FlaxBertForTokenClassification, - FlaxBertModel, - FlaxBertPreTrainedModel, - ) - + from .configuration_bert import * + from .modeling_bert import * + from .modeling_flax_bert import * + from .modeling_tf_bert import * + from .tokenization_bert import * + from .tokenization_bert_fast import * + from .tokenization_bert_tf import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bert/configuration_bert.py b/src/transformers/models/bert/configuration_bert.py index 613cf6a11463..ea29fb81c435 100644 --- a/src/transformers/models/bert/configuration_bert.py +++ b/src/transformers/models/bert/configuration_bert.py @@ -149,3 +149,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["BertConfig", "BertOnnxConfig"] diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index 850e93ca59fb..ca2b7910708d 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -2021,3 +2021,18 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_bert", + "BertPreTrainedModel", + "BertModel", + "BertForPreTraining", + "BertLMHeadModel", + "BertForMaskedLM", + "BertForNextSentencePrediction", + "BertForSequenceClassification", + "BertForMultipleChoice", + "BertForTokenClassification", + "BertForQuestionAnswering", +] diff --git a/src/transformers/models/bert/modeling_flax_bert.py b/src/transformers/models/bert/modeling_flax_bert.py index 772ea2bf12b2..99fd0dd23f14 100644 --- a/src/transformers/models/bert/modeling_flax_bert.py +++ b/src/transformers/models/bert/modeling_flax_bert.py @@ -1711,3 +1711,16 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxBertPreTrainedModel", + "FlaxBertModel", + "FlaxBertForPreTraining", + "FlaxBertForMaskedLM", + "FlaxBertForNextSentencePrediction", + "FlaxBertForSequenceClassification", + "FlaxBertForMultipleChoice", + "FlaxBertForTokenClassification", + "FlaxBertForQuestionAnswering", + "FlaxBertForCausalLM", +] diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index bb3281278ada..febe6360232e 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -2108,3 +2108,18 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFBertPreTrainedModel", + "TFBertModel", + "TFBertForPreTraining", + "TFBertForMaskedLM", + "TFBertLMHeadModel", + "TFBertForNextSentencePrediction", + "TFBertForSequenceClassification", + "TFBertForMultipleChoice", + "TFBertForTokenClassification", + "TFBertForQuestionAnswering", + "TFBertMainLayer", +] diff --git a/src/transformers/models/bert/tokenization_bert.py b/src/transformers/models/bert/tokenization_bert.py index cd70e38d008a..383c4712bd04 100644 --- a/src/transformers/models/bert/tokenization_bert.py +++ b/src/transformers/models/bert/tokenization_bert.py @@ -497,3 +497,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["BertTokenizer", "BasicTokenizer", "WordpieceTokenizer"] diff --git a/src/transformers/models/bert/tokenization_bert_fast.py b/src/transformers/models/bert/tokenization_bert_fast.py index f48977728470..4a89e6053b98 100644 --- a/src/transformers/models/bert/tokenization_bert_fast.py +++ b/src/transformers/models/bert/tokenization_bert_fast.py @@ -170,3 +170,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["BertTokenizerFast"] diff --git a/src/transformers/models/bert/tokenization_bert_tf.py b/src/transformers/models/bert/tokenization_bert_tf.py index ebf88eeac9bb..5fc1e75306d9 100644 --- a/src/transformers/models/bert/tokenization_bert_tf.py +++ b/src/transformers/models/bert/tokenization_bert_tf.py @@ -6,9 +6,11 @@ from tensorflow_text import FastBertTokenizer, ShrinkLongestTrimmer, case_fold_utf8, combine_segments, pad_model_inputs from ...modeling_tf_utils import keras +from ...utils.import_utils import export from .tokenization_bert import BertTokenizer +@export(backends=("tf",)) class TFBertTokenizer(keras.layers.Layer): """ This is an in-graph tokenizer for BERT. It should be initialized similarly to other tokenizers, using the @@ -252,3 +254,6 @@ def get_config(self): "sep_token_id": self.sep_token_id, "pad_token_id": self.pad_token_id, } + + +__all__ = ["TFBertTokenizer"] diff --git a/src/transformers/models/bert_generation/__init__.py b/src/transformers/models/bert_generation/__init__.py index 14cf8bb58793..75017d26c5b6 100644 --- a/src/transformers/models/bert_generation/__init__.py +++ b/src/transformers/models/bert_generation/__init__.py @@ -11,61 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available - - -_import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bert_generation"] = [ - "BertGenerationDecoder", - "BertGenerationEncoder", - "BertGenerationPreTrainedModel", - "load_tf_weights_in_bert_generation", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_bert_generation import BertGenerationConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_bert_generation import BertGenerationTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bert_generation import ( - BertGenerationDecoder, - BertGenerationEncoder, - BertGenerationPreTrainedModel, - load_tf_weights_in_bert_generation, - ) - + from .configuration_bert_generation import * + from .modeling_bert_generation import * + from .tokenization_bert_generation import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bert_generation/configuration_bert_generation.py b/src/transformers/models/bert_generation/configuration_bert_generation.py index d1d1b51b6538..1abe7c1a1c44 100644 --- a/src/transformers/models/bert_generation/configuration_bert_generation.py +++ b/src/transformers/models/bert_generation/configuration_bert_generation.py @@ -122,3 +122,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache + + +__all__ = ["BertGenerationConfig"] diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index a5fb3d053115..275afcfdfcd4 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -1018,3 +1018,11 @@ def _reorder_cache(self, past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "load_tf_weights_in_bert_generation", + "BertGenerationPreTrainedModel", + "BertGenerationEncoder", + "BertGenerationDecoder", +] diff --git a/src/transformers/models/bert_generation/tokenization_bert_generation.py b/src/transformers/models/bert_generation/tokenization_bert_generation.py index b1adb9b62b25..3c4d03e37112 100644 --- a/src/transformers/models/bert_generation/tokenization_bert_generation.py +++ b/src/transformers/models/bert_generation/tokenization_bert_generation.py @@ -22,6 +22,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -29,6 +30,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} +@export(backends=("sentencepiece",)) class BertGenerationTokenizer(PreTrainedTokenizer): """ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -170,3 +172,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["BertGenerationTokenizer"] diff --git a/src/transformers/models/bert_japanese/__init__.py b/src/transformers/models/bert_japanese/__init__.py index a569c3cc54bf..7e3bd4fc2ca1 100644 --- a/src/transformers/models/bert_japanese/__init__.py +++ b/src/transformers/models/bert_japanese/__init__.py @@ -11,19 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer - + from .tokenization_bert_japanese import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bert_japanese/tokenization_bert_japanese.py b/src/transformers/models/bert_japanese/tokenization_bert_japanese.py index 10d71c417a7a..fa5067596c64 100644 --- a/src/transformers/models/bert_japanese/tokenization_bert_japanese.py +++ b/src/transformers/models/bert_japanese/tokenization_bert_japanese.py @@ -977,3 +977,6 @@ def tokenize(self, text): new_pieces.append(piece) return new_pieces + + +__all__ = ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"] diff --git a/src/transformers/models/bertweet/__init__.py b/src/transformers/models/bertweet/__init__.py index 42e4a23337c2..c574df06c143 100644 --- a/src/transformers/models/bertweet/__init__.py +++ b/src/transformers/models/bertweet/__init__.py @@ -11,19 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_bertweet": ["BertweetTokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_bertweet import BertweetTokenizer - + from .tokenization_bertweet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bertweet/tokenization_bertweet.py b/src/transformers/models/bertweet/tokenization_bertweet.py index f478dd0832b6..2d5f5895e831 100644 --- a/src/transformers/models/bertweet/tokenization_bertweet.py +++ b/src/transformers/models/bertweet/tokenization_bertweet.py @@ -764,3 +764,5 @@ def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=Fa ############################################################################### + +__all__ = ["BertweetTokenizer"] diff --git a/src/transformers/models/big_bird/__init__.py b/src/transformers/models/big_bird/__init__.py index 8eda33d9ee66..fb64fdcc6c69 100644 --- a/src/transformers/models/big_bird/__init__.py +++ b/src/transformers/models/big_bird/__init__.py @@ -13,133 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_big_bird": ["BigBirdConfig", "BigBirdOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_big_bird"] = ["BigBirdTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_big_bird_fast"] = ["BigBirdTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_big_bird"] = [ - "BigBirdForCausalLM", - "BigBirdForMaskedLM", - "BigBirdForMultipleChoice", - "BigBirdForPreTraining", - "BigBirdForQuestionAnswering", - "BigBirdForSequenceClassification", - "BigBirdForTokenClassification", - "BigBirdLayer", - "BigBirdModel", - "BigBirdPreTrainedModel", - "load_tf_weights_in_big_bird", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_big_bird"] = [ - "FlaxBigBirdForCausalLM", - "FlaxBigBirdForMaskedLM", - "FlaxBigBirdForMultipleChoice", - "FlaxBigBirdForPreTraining", - "FlaxBigBirdForQuestionAnswering", - "FlaxBigBirdForSequenceClassification", - "FlaxBigBirdForTokenClassification", - "FlaxBigBirdModel", - "FlaxBigBirdPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_big_bird import BigBirdConfig, BigBirdOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_big_bird import BigBirdTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_big_bird_fast import BigBirdTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_big_bird import ( - BigBirdForCausalLM, - BigBirdForMaskedLM, - BigBirdForMultipleChoice, - BigBirdForPreTraining, - BigBirdForQuestionAnswering, - BigBirdForSequenceClassification, - BigBirdForTokenClassification, - BigBirdLayer, - BigBirdModel, - BigBirdPreTrainedModel, - load_tf_weights_in_big_bird, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_big_bird import ( - FlaxBigBirdForCausalLM, - FlaxBigBirdForMaskedLM, - FlaxBigBirdForMultipleChoice, - FlaxBigBirdForPreTraining, - FlaxBigBirdForQuestionAnswering, - FlaxBigBirdForSequenceClassification, - FlaxBigBirdForTokenClassification, - FlaxBigBirdModel, - FlaxBigBirdPreTrainedModel, - ) - + from .configuration_big_bird import * + from .modeling_big_bird import * + from .modeling_flax_big_bird import * + from .tokenization_big_bird import * + from .tokenization_big_bird_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/big_bird/configuration_big_bird.py b/src/transformers/models/big_bird/configuration_big_bird.py index cbcf2e6bf57f..1019e008aa3b 100644 --- a/src/transformers/models/big_bird/configuration_big_bird.py +++ b/src/transformers/models/big_bird/configuration_big_bird.py @@ -171,3 +171,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["BigBirdConfig", "BigBirdOnnxConfig"] diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index a6b1660d5ae1..45de7352ff9c 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -3147,3 +3147,17 @@ def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): mask.unsqueeze_(0) # -> (1, maxlen) mask = torch.where(mask < q_lengths, 1, 0) return mask + + +__all__ = [ + "load_tf_weights_in_big_bird", + "BigBirdPreTrainedModel", + "BigBirdModel", + "BigBirdForPreTraining", + "BigBirdForMaskedLM", + "BigBirdForCausalLM", + "BigBirdForSequenceClassification", + "BigBirdForMultipleChoice", + "BigBirdForTokenClassification", + "BigBirdForQuestionAnswering", +] diff --git a/src/transformers/models/big_bird/modeling_flax_big_bird.py b/src/transformers/models/big_bird/modeling_flax_big_bird.py index 94eabdec451d..901f5de9e5fc 100644 --- a/src/transformers/models/big_bird/modeling_flax_big_bird.py +++ b/src/transformers/models/big_bird/modeling_flax_big_bird.py @@ -2633,3 +2633,15 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxBigBirdPreTrainedModel", + "FlaxBigBirdModel", + "FlaxBigBirdForPreTraining", + "FlaxBigBirdForMaskedLM", + "FlaxBigBirdForSequenceClassification", + "FlaxBigBirdForMultipleChoice", + "FlaxBigBirdForTokenClassification", + "FlaxBigBirdForQuestionAnswering", + "FlaxBigBirdForCausalLM", +] diff --git a/src/transformers/models/big_bird/tokenization_big_bird.py b/src/transformers/models/big_bird/tokenization_big_bird.py index e435477ef3c6..ae7664038fb7 100644 --- a/src/transformers/models/big_bird/tokenization_big_bird.py +++ b/src/transformers/models/big_bird/tokenization_big_bird.py @@ -23,6 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -30,6 +31,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} +@export(backends=("sentencepiece",)) class BigBirdTokenizer(PreTrainedTokenizer): """ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -319,3 +321,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + +__all__ = ["BigBirdTokenizer"] diff --git a/src/transformers/models/big_bird/tokenization_big_bird_fast.py b/src/transformers/models/big_bird/tokenization_big_bird_fast.py index f4ccbb8b1797..83f2fac07fae 100644 --- a/src/transformers/models/big_bird/tokenization_big_bird_fast.py +++ b/src/transformers/models/big_bird/tokenization_big_bird_fast.py @@ -227,3 +227,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["BigBirdTokenizerFast"] diff --git a/src/transformers/models/bigbird_pegasus/__init__.py b/src/transformers/models/bigbird_pegasus/__init__.py index 85621ce76d90..d37a14d7fce7 100644 --- a/src/transformers/models/bigbird_pegasus/__init__.py +++ b/src/transformers/models/bigbird_pegasus/__init__.py @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_bigbird_pegasus": [ - "BigBirdPegasusConfig", - "BigBirdPegasusOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bigbird_pegasus"] = [ - "BigBirdPegasusForCausalLM", - "BigBirdPegasusForConditionalGeneration", - "BigBirdPegasusForQuestionAnswering", - "BigBirdPegasusForSequenceClassification", - "BigBirdPegasusModel", - "BigBirdPegasusPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_bigbird_pegasus import ( - BigBirdPegasusConfig, - BigBirdPegasusOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bigbird_pegasus import ( - BigBirdPegasusForCausalLM, - BigBirdPegasusForConditionalGeneration, - BigBirdPegasusForQuestionAnswering, - BigBirdPegasusForSequenceClassification, - BigBirdPegasusModel, - BigBirdPegasusPreTrainedModel, - ) - - + from .configuration_bigbird_pegasus import * + from .modeling_bigbird_pegasus import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py index 9de2a7267acb..b9c3986c9d3c 100644 --- a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py @@ -17,10 +17,10 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import TensorType, is_torch_available, logging @@ -407,3 +407,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) + + +__all__ = ["BigBirdPegasusConfig", "BigBirdPegasusOnnxConfig"] diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index 9f8e3cd19cd8..5a5c7b7db6c9 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -3083,3 +3083,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "BigBirdPegasusPreTrainedModel", + "BigBirdPegasusModel", + "BigBirdPegasusForConditionalGeneration", + "BigBirdPegasusForSequenceClassification", + "BigBirdPegasusForQuestionAnswering", + "BigBirdPegasusForCausalLM", +] diff --git a/src/transformers/models/biogpt/__init__.py b/src/transformers/models/biogpt/__init__.py index 355c87e67ba2..6bb2eea1b5bd 100644 --- a/src/transformers/models/biogpt/__init__.py +++ b/src/transformers/models/biogpt/__init__.py @@ -13,49 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_biogpt": ["BioGptConfig"], - "tokenization_biogpt": ["BioGptTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_biogpt"] = [ - "BioGptForCausalLM", - "BioGptForTokenClassification", - "BioGptForSequenceClassification", - "BioGptModel", - "BioGptPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_biogpt import BioGptConfig - from .tokenization_biogpt import BioGptTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_biogpt import ( - BioGptForCausalLM, - BioGptForSequenceClassification, - BioGptForTokenClassification, - BioGptModel, - BioGptPreTrainedModel, - ) - - + from .configuration_biogpt import * + from .modeling_biogpt import * + from .tokenization_biogpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/biogpt/configuration_biogpt.py b/src/transformers/models/biogpt/configuration_biogpt.py index 18f7b6d6bf06..b338092edd1d 100644 --- a/src/transformers/models/biogpt/configuration_biogpt.py +++ b/src/transformers/models/biogpt/configuration_biogpt.py @@ -129,3 +129,6 @@ def __init__( self.layerdrop = layerdrop self.activation_dropout = activation_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["BioGptConfig"] diff --git a/src/transformers/models/biogpt/modeling_biogpt.py b/src/transformers/models/biogpt/modeling_biogpt.py index 020f52833d5b..c521824a035e 100755 --- a/src/transformers/models/biogpt/modeling_biogpt.py +++ b/src/transformers/models/biogpt/modeling_biogpt.py @@ -934,3 +934,12 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.biogpt.embed_tokens = value + + +__all__ = [ + "BioGptPreTrainedModel", + "BioGptModel", + "BioGptForCausalLM", + "BioGptForTokenClassification", + "BioGptForSequenceClassification", +] diff --git a/src/transformers/models/biogpt/tokenization_biogpt.py b/src/transformers/models/biogpt/tokenization_biogpt.py index f9760eb604e7..a898976d985f 100644 --- a/src/transformers/models/biogpt/tokenization_biogpt.py +++ b/src/transformers/models/biogpt/tokenization_biogpt.py @@ -356,3 +356,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["BioGptTokenizer"] diff --git a/src/transformers/models/bit/__init__.py b/src/transformers/models/bit/__init__.py index 8f298a9adf65..48cbe0a59b2d 100644 --- a/src/transformers/models/bit/__init__.py +++ b/src/transformers/models/bit/__init__.py @@ -13,59 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_bit": ["BitConfig", "BitOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bit"] = [ - "BitForImageClassification", - "BitModel", - "BitPreTrainedModel", - "BitBackbone", - ] - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_bit"] = ["BitImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_bit import BitConfig, BitOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bit import ( - BitBackbone, - BitForImageClassification, - BitModel, - BitPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_bit import BitImageProcessor - + from .configuration_bit import * + from .image_processing_bit import * + from .modeling_bit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bit/configuration_bit.py b/src/transformers/models/bit/configuration_bit.py index 8f4326a2d5a7..238749f1fbe7 100644 --- a/src/transformers/models/bit/configuration_bit.py +++ b/src/transformers/models/bit/configuration_bit.py @@ -131,3 +131,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["BitConfig"] diff --git a/src/transformers/models/bit/image_processing_bit.py b/src/transformers/models/bit/image_processing_bit.py index ba2340789970..e238c553c683 100644 --- a/src/transformers/models/bit/image_processing_bit.py +++ b/src/transformers/models/bit/image_processing_bit.py @@ -39,6 +39,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -48,6 +49,7 @@ import PIL +@export(backends=("vision",)) class BitImageProcessor(BaseImageProcessor): r""" Constructs a BiT image processor. @@ -319,3 +321,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["BitImageProcessor"] diff --git a/src/transformers/models/bit/modeling_bit.py b/src/transformers/models/bit/modeling_bit.py index 3c7e4c57b2f1..0cc1a21a9fdb 100644 --- a/src/transformers/models/bit/modeling_bit.py +++ b/src/transformers/models/bit/modeling_bit.py @@ -901,3 +901,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["BitPreTrainedModel", "BitModel", "BitForImageClassification", "BitBackbone"] diff --git a/src/transformers/models/blenderbot/__init__.py b/src/transformers/models/blenderbot/__init__.py index 8b53b9100a4a..05584c93412e 100644 --- a/src/transformers/models/blenderbot/__init__.py +++ b/src/transformers/models/blenderbot/__init__.py @@ -11,128 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_blenderbot": [ - "BlenderbotConfig", - "BlenderbotOnnxConfig", - ], - "tokenization_blenderbot": ["BlenderbotTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_blenderbot_fast"] = ["BlenderbotTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_blenderbot"] = [ - "BlenderbotForCausalLM", - "BlenderbotForConditionalGeneration", - "BlenderbotModel", - "BlenderbotPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_blenderbot"] = [ - "TFBlenderbotForConditionalGeneration", - "TFBlenderbotModel", - "TFBlenderbotPreTrainedModel", - ] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_blenderbot"] = [ - "FlaxBlenderbotForConditionalGeneration", - "FlaxBlenderbotModel", - "FlaxBlenderbotPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_blenderbot import ( - BlenderbotConfig, - BlenderbotOnnxConfig, - ) - from .tokenization_blenderbot import BlenderbotTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_blenderbot_fast import BlenderbotTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_blenderbot import ( - BlenderbotForCausalLM, - BlenderbotForConditionalGeneration, - BlenderbotModel, - BlenderbotPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_blenderbot import ( - TFBlenderbotForConditionalGeneration, - TFBlenderbotModel, - TFBlenderbotPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_blenderbot import ( - FlaxBlenderbotForConditionalGeneration, - FlaxBlenderbotModel, - FlaxBlenderbotPreTrainedModel, - ) - + from .configuration_blenderbot import * + from .modeling_blenderbot import * + from .modeling_flax_blenderbot import * + from .modeling_tf_blenderbot import * + from .tokenization_blenderbot import * + from .tokenization_blenderbot_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/blenderbot/configuration_blenderbot.py b/src/transformers/models/blenderbot/configuration_blenderbot.py index 105d38c25591..71f586044e7f 100644 --- a/src/transformers/models/blenderbot/configuration_blenderbot.py +++ b/src/transformers/models/blenderbot/configuration_blenderbot.py @@ -17,11 +17,11 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging @@ -390,3 +390,6 @@ def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence} + + +__all__ = ["BlenderbotConfig", "BlenderbotOnnxConfig"] diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 12d259fde71e..45db7ee0ae74 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -1609,3 +1609,11 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "BlenderbotPreTrainedModel", + "BlenderbotModel", + "BlenderbotForConditionalGeneration", + "BlenderbotForCausalLM", +] diff --git a/src/transformers/models/blenderbot/modeling_flax_blenderbot.py b/src/transformers/models/blenderbot/modeling_flax_blenderbot.py index 97c9653da36d..b511c330f803 100644 --- a/src/transformers/models/blenderbot/modeling_flax_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_flax_blenderbot.py @@ -1503,3 +1503,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxBlenderbotForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = ["FlaxBlenderbotPreTrainedModel", "FlaxBlenderbotModel", "FlaxBlenderbotForConditionalGeneration"] diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index bbfe4726deef..25765ec1569a 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -1553,3 +1553,11 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = [ + "TFBlenderbotPreTrainedModel", + "TFBlenderbotModel", + "TFBlenderbotForConditionalGeneration", + "TFBlenderbotMainLayer", +] diff --git a/src/transformers/models/blenderbot/tokenization_blenderbot.py b/src/transformers/models/blenderbot/tokenization_blenderbot.py index 1a8807214d52..08b2a8c1283b 100644 --- a/src/transformers/models/blenderbot/tokenization_blenderbot.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot.py @@ -405,3 +405,6 @@ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ return token_ids_0 + [self.eos_token_id] + + +__all__ = ["BlenderbotTokenizer"] diff --git a/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py b/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py index 0d24ed62c574..f649246517d2 100644 --- a/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py @@ -287,3 +287,6 @@ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ return token_ids_0 + [self.eos_token_id] + + +__all__ = ["BlenderbotTokenizerFast"] diff --git a/src/transformers/models/blenderbot_small/__init__.py b/src/transformers/models/blenderbot_small/__init__.py index e6cab05c0cae..1c60cc772399 100644 --- a/src/transformers/models/blenderbot_small/__init__.py +++ b/src/transformers/models/blenderbot_small/__init__.py @@ -13,122 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_blenderbot_small": [ - "BlenderbotSmallConfig", - "BlenderbotSmallOnnxConfig", - ], - "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_blenderbot_small_fast"] = ["BlenderbotSmallTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_blenderbot_small"] = [ - "BlenderbotSmallForCausalLM", - "BlenderbotSmallForConditionalGeneration", - "BlenderbotSmallModel", - "BlenderbotSmallPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_blenderbot_small"] = [ - "TFBlenderbotSmallForConditionalGeneration", - "TFBlenderbotSmallModel", - "TFBlenderbotSmallPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_blenderbot_small"] = [ - "FlaxBlenderbotSmallForConditionalGeneration", - "FlaxBlenderbotSmallModel", - "FlaxBlenderbotSmallPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_blenderbot_small import ( - BlenderbotSmallConfig, - BlenderbotSmallOnnxConfig, - ) - from .tokenization_blenderbot_small import BlenderbotSmallTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_blenderbot_small import ( - BlenderbotSmallForCausalLM, - BlenderbotSmallForConditionalGeneration, - BlenderbotSmallModel, - BlenderbotSmallPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_blenderbot_small import ( - TFBlenderbotSmallForConditionalGeneration, - TFBlenderbotSmallModel, - TFBlenderbotSmallPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_blenderbot_small import ( - FlaxBlenderbotSmallForConditionalGeneration, - FlaxBlenderbotSmallModel, - FlaxBlenderbotSmallPreTrainedModel, - ) - + from .configuration_blenderbot_small import * + from .modeling_blenderbot_small import * + from .modeling_flax_blenderbot_small import * + from .modeling_tf_blenderbot_small import * + from .tokenization_blenderbot_small import * + from .tokenization_blenderbot_small_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py index 6ee26365de8d..39ccdb8e685b 100644 --- a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py @@ -17,11 +17,11 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging @@ -385,3 +385,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) + + +__all__ = ["BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig"] diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index aa0e38bd8e91..67a8aee42114 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -1561,3 +1561,11 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "BlenderbotSmallPreTrainedModel", + "BlenderbotSmallModel", + "BlenderbotSmallForConditionalGeneration", + "BlenderbotSmallForCausalLM", +] diff --git a/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py index 325ff0a20b55..68c001fc9eef 100644 --- a/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py @@ -1519,3 +1519,9 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = [ + "FlaxBlenderbotSmallPreTrainedModel", + "FlaxBlenderbotSmallModel", + "FlaxBlenderbotSmallForConditionalGeneration", +] diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 157646297990..067d05f7d16b 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -1523,3 +1523,11 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = [ + "TFBlenderbotSmallPreTrainedModel", + "TFBlenderbotSmallModel", + "TFBlenderbotSmallForConditionalGeneration", + "TFBlenderbotSmallMainLayer", +] diff --git a/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py b/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py index 08c7be332e31..be950f0dbe62 100644 --- a/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py @@ -217,3 +217,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return vocab_file, merge_file + + +__all__ = ["BlenderbotSmallTokenizer"] diff --git a/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py b/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py index 21fb76cbfc86..ac98ce008baa 100644 --- a/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py +++ b/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py @@ -98,3 +98,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["BlenderbotSmallTokenizerFast"] diff --git a/src/transformers/models/blip/__init__.py b/src/transformers/models/blip/__init__.py index f78c2500bd64..b3c2cccca2ff 100644 --- a/src/transformers/models/blip/__init__.py +++ b/src/transformers/models/blip/__init__.py @@ -13,110 +13,20 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_blip": [ - "BlipConfig", - "BlipTextConfig", - "BlipVisionConfig", - ], - "processing_blip": ["BlipProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_blip"] = ["BlipImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_blip"] = [ - "BlipModel", - "BlipPreTrainedModel", - "BlipForConditionalGeneration", - "BlipForQuestionAnswering", - "BlipVisionModel", - "BlipTextModel", - "BlipForImageTextRetrieval", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_blip"] = [ - "TFBlipModel", - "TFBlipPreTrainedModel", - "TFBlipForConditionalGeneration", - "TFBlipForQuestionAnswering", - "TFBlipVisionModel", - "TFBlipTextModel", - "TFBlipForImageTextRetrieval", - ] - if TYPE_CHECKING: - from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig - from .processing_blip import BlipProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_blip import BlipImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_blip import ( - BlipForConditionalGeneration, - BlipForImageTextRetrieval, - BlipForQuestionAnswering, - BlipModel, - BlipPreTrainedModel, - BlipTextModel, - BlipVisionModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_blip import ( - TFBlipForConditionalGeneration, - TFBlipForImageTextRetrieval, - TFBlipForQuestionAnswering, - TFBlipModel, - TFBlipPreTrainedModel, - TFBlipTextModel, - TFBlipVisionModel, - ) - + from .configuration_blip import * + from .image_processing_blip import * + from .modeling_blip import * + from .modeling_blip_text import * + from .modeling_tf_blip import * + from .modeling_tf_blip_text import * + from .processing_blip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/blip/configuration_blip.py b/src/transformers/models/blip/configuration_blip.py index 4772738be103..913bae71a94d 100644 --- a/src/transformers/models/blip/configuration_blip.py +++ b/src/transformers/models/blip/configuration_blip.py @@ -360,3 +360,6 @@ def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: Bl """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["BlipTextConfig", "BlipVisionConfig", "BlipConfig"] diff --git a/src/transformers/models/blip/image_processing_blip.py b/src/transformers/models/blip/image_processing_blip.py index 6f520f9fb9cb..b8471a9e102e 100644 --- a/src/transformers/models/blip/image_processing_blip.py +++ b/src/transformers/models/blip/image_processing_blip.py @@ -34,6 +34,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -43,6 +44,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class BlipImageProcessor(BaseImageProcessor): r""" Constructs a BLIP image processor. @@ -292,3 +294,6 @@ def preprocess( encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs + + +__all__ = ["BlipImageProcessor"] diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index 2392961037f2..06ad3c8e1aef 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -1563,3 +1563,13 @@ def forward( attentions=vision_outputs.attentions, question_embeds=question_embeds, ) + + +__all__ = [ + "BlipPreTrainedModel", + "BlipVisionModel", + "BlipModel", + "BlipForConditionalGeneration", + "BlipForQuestionAnswering", + "BlipForImageTextRetrieval", +] diff --git a/src/transformers/models/blip/modeling_blip_text.py b/src/transformers/models/blip/modeling_blip_text.py index a800ba89825d..716bf9175801 100644 --- a/src/transformers/models/blip/modeling_blip_text.py +++ b/src/transformers/models/blip/modeling_blip_text.py @@ -568,6 +568,8 @@ def _init_weights(self, module): # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571 + + class BlipTextModel(BlipTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of @@ -948,3 +950,6 @@ def _reorder_cache(self, past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["BlipTextModel"] diff --git a/src/transformers/models/blip/modeling_tf_blip.py b/src/transformers/models/blip/modeling_tf_blip.py index 6c9942b73ace..8980c8543517 100644 --- a/src/transformers/models/blip/modeling_tf_blip.py +++ b/src/transformers/models/blip/modeling_tf_blip.py @@ -1696,3 +1696,14 @@ def build(self, input_shape=None): if getattr(self, "itm_head", None) is not None: with tf.name_scope(self.itm_head.name): self.itm_head.build([None, None, self.config.text_config.hidden_size]) + + +__all__ = [ + "TFBlipPreTrainedModel", + "TFBlipVisionModel", + "TFBlipModel", + "TFBlipForConditionalGeneration", + "TFBlipForQuestionAnswering", + "TFBlipForImageTextRetrieval", + "TFBlipMainLayer", +] diff --git a/src/transformers/models/blip/modeling_tf_blip_text.py b/src/transformers/models/blip/modeling_tf_blip_text.py index b605a25eeb4b..432d1c367381 100644 --- a/src/transformers/models/blip/modeling_tf_blip_text.py +++ b/src/transformers/models/blip/modeling_tf_blip_text.py @@ -726,6 +726,8 @@ class TFBlipTextPreTrainedModel(TFPreTrainedModel): # Adapted from https://github.com/salesforce/BLIP/blob/3a29b7410476bf5f2ba0955827390eb6ea1f4f9d/models/med.py#L571 + + class TFBlipTextModel(TFBlipTextPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of @@ -1120,3 +1122,6 @@ def build(self, input_shape=None): if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) + + +__all__ = ["TFBlipTextModel"] diff --git a/src/transformers/models/blip/processing_blip.py b/src/transformers/models/blip/processing_blip.py index cd96b46ab1d2..1f9e57f14414 100644 --- a/src/transformers/models/blip/processing_blip.py +++ b/src/transformers/models/blip/processing_blip.py @@ -149,3 +149,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["BlipProcessor"] diff --git a/src/transformers/models/blip_2/__init__.py b/src/transformers/models/blip_2/__init__.py index 329ddfe19ac6..c77be0b53f62 100644 --- a/src/transformers/models/blip_2/__init__.py +++ b/src/transformers/models/blip_2/__init__.py @@ -13,61 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_blip_2": [ - "Blip2Config", - "Blip2QFormerConfig", - "Blip2VisionConfig", - ], - "processing_blip_2": ["Blip2Processor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_blip_2"] = [ - "Blip2Model", - "Blip2VisionModelWithProjection", - "Blip2QFormerModel", - "Blip2PreTrainedModel", - "Blip2ForConditionalGeneration", - "Blip2ForImageTextRetrieval", - "Blip2VisionModel", - "Blip2TextModelWithProjection", - ] - if TYPE_CHECKING: - from .configuration_blip_2 import ( - Blip2Config, - Blip2QFormerConfig, - Blip2VisionConfig, - ) - from .processing_blip_2 import Blip2Processor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_blip_2 import ( - Blip2ForConditionalGeneration, - Blip2ForImageTextRetrieval, - Blip2Model, - Blip2PreTrainedModel, - Blip2QFormerModel, - Blip2TextModelWithProjection, - Blip2VisionModel, - Blip2VisionModelWithProjection, - ) - + from .configuration_blip_2 import * + from .modeling_blip_2 import * + from .processing_blip_2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/blip_2/configuration_blip_2.py b/src/transformers/models/blip_2/configuration_blip_2.py index 16fa4aec3849..050ae4b7746e 100644 --- a/src/transformers/models/blip_2/configuration_blip_2.py +++ b/src/transformers/models/blip_2/configuration_blip_2.py @@ -377,3 +377,6 @@ def from_vision_qformer_text_configs( text_config=text_config.to_dict() if text_config is not None else None, **kwargs, ) + + +__all__ = ["Blip2VisionConfig", "Blip2QFormerConfig", "Blip2Config"] diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index 8c3b5254ea8b..f1b65ef402ac 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -2537,3 +2537,15 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = [ + "Blip2PreTrainedModel", + "Blip2VisionModel", + "Blip2QFormerModel", + "Blip2Model", + "Blip2ForConditionalGeneration", + "Blip2ForImageTextRetrieval", + "Blip2TextModelWithProjection", + "Blip2VisionModelWithProjection", +] diff --git a/src/transformers/models/blip_2/processing_blip_2.py b/src/transformers/models/blip_2/processing_blip_2.py index e879b41eb156..979c97e13607 100644 --- a/src/transformers/models/blip_2/processing_blip_2.py +++ b/src/transformers/models/blip_2/processing_blip_2.py @@ -195,3 +195,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Blip2Processor"] diff --git a/src/transformers/models/bloom/__init__.py b/src/transformers/models/bloom/__init__.py index 3c903b39dca2..1192d53c1058 100644 --- a/src/transformers/models/bloom/__init__.py +++ b/src/transformers/models/bloom/__init__.py @@ -11,91 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_bloom": ["BloomConfig", "BloomOnnxConfig"], -} -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_bloom_fast"] = ["BloomTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bloom"] = [ - "BloomForCausalLM", - "BloomModel", - "BloomPreTrainedModel", - "BloomForSequenceClassification", - "BloomForTokenClassification", - "BloomForQuestionAnswering", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_bloom"] = [ - "FlaxBloomForCausalLM", - "FlaxBloomModel", - "FlaxBloomPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_bloom import BloomConfig, BloomOnnxConfig - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_bloom_fast import BloomTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bloom import ( - BloomForCausalLM, - BloomForQuestionAnswering, - BloomForSequenceClassification, - BloomForTokenClassification, - BloomModel, - BloomPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_bloom import FlaxBloomForCausalLM, FlaxBloomModel, FlaxBloomPreTrainedModel + from .configuration_bloom import * + from .modeling_bloom import * + from .modeling_flax_bloom import * + from .tokenization_bloom_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index dc9f6d3082ec..601091c4d76a 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -21,7 +21,7 @@ if TYPE_CHECKING: - from ... import PreTrainedTokenizer, TensorType + from ...tokenization_utils import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec @@ -232,3 +232,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["BloomConfig", "BloomOnnxConfig"] diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index c4ae776959c7..d29453cab687 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -1340,3 +1340,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "BloomPreTrainedModel", + "BloomModel", + "BloomForCausalLM", + "BloomForSequenceClassification", + "BloomForTokenClassification", + "BloomForQuestionAnswering", +] diff --git a/src/transformers/models/bloom/modeling_flax_bloom.py b/src/transformers/models/bloom/modeling_flax_bloom.py index 187230f35ab9..3aa819382387 100644 --- a/src/transformers/models/bloom/modeling_flax_bloom.py +++ b/src/transformers/models/bloom/modeling_flax_bloom.py @@ -732,3 +732,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_call_sample_docstring(FlaxBloomForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) + +__all__ = ["FlaxBloomPreTrainedModel", "FlaxBloomModel", "FlaxBloomForCausalLM"] diff --git a/src/transformers/models/bloom/tokenization_bloom_fast.py b/src/transformers/models/bloom/tokenization_bloom_fast.py index 54e637735308..b893ec06fcf7 100644 --- a/src/transformers/models/bloom/tokenization_bloom_fast.py +++ b/src/transformers/models/bloom/tokenization_bloom_fast.py @@ -147,3 +147,6 @@ def _encode_plus(self, *args, **kwargs) -> BatchEncoding: def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["BloomTokenizerFast"] diff --git a/src/transformers/models/bridgetower/__init__.py b/src/transformers/models/bridgetower/__init__.py index 3120ca9f2a16..1029fa491e6b 100644 --- a/src/transformers/models/bridgetower/__init__.py +++ b/src/transformers/models/bridgetower/__init__.py @@ -13,73 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_bridgetower": [ - "BridgeTowerConfig", - "BridgeTowerTextConfig", - "BridgeTowerVisionConfig", - ], - "processing_bridgetower": ["BridgeTowerProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_bridgetower"] = ["BridgeTowerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bridgetower"] = [ - "BridgeTowerForContrastiveLearning", - "BridgeTowerForImageAndTextRetrieval", - "BridgeTowerForMaskedLM", - "BridgeTowerModel", - "BridgeTowerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_bridgetower import ( - BridgeTowerConfig, - BridgeTowerTextConfig, - BridgeTowerVisionConfig, - ) - from .processing_bridgetower import BridgeTowerProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_bridgetower import BridgeTowerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bridgetower import ( - BridgeTowerForContrastiveLearning, - BridgeTowerForImageAndTextRetrieval, - BridgeTowerForMaskedLM, - BridgeTowerModel, - BridgeTowerPreTrainedModel, - ) - - + from .configuration_bridgetower import * + from .image_processing_bridgetower import * + from .modeling_bridgetower import * + from .processing_bridgetower import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bridgetower/configuration_bridgetower.py b/src/transformers/models/bridgetower/configuration_bridgetower.py index 4985b6ef89fe..bf95ed42966d 100644 --- a/src/transformers/models/bridgetower/configuration_bridgetower.py +++ b/src/transformers/models/bridgetower/configuration_bridgetower.py @@ -344,3 +344,6 @@ def from_text_vision_configs( """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["BridgeTowerVisionConfig", "BridgeTowerTextConfig", "BridgeTowerConfig"] diff --git a/src/transformers/models/bridgetower/image_processing_bridgetower.py b/src/transformers/models/bridgetower/image_processing_bridgetower.py index 7272093715f8..2b0ae0a7b9f0 100644 --- a/src/transformers/models/bridgetower/image_processing_bridgetower.py +++ b/src/transformers/models/bridgetower/image_processing_bridgetower.py @@ -35,6 +35,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -121,6 +122,7 @@ def get_resize_output_image_size( return new_height, new_width +@export(backends=("vision",)) class BridgeTowerImageProcessor(BaseImageProcessor): r""" Constructs a BridgeTower image processor. @@ -538,3 +540,6 @@ def preprocess( encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs + + +__all__ = ["BridgeTowerImageProcessor"] diff --git a/src/transformers/models/bridgetower/modeling_bridgetower.py b/src/transformers/models/bridgetower/modeling_bridgetower.py index 81785e147db9..fa02b6bbb15d 100644 --- a/src/transformers/models/bridgetower/modeling_bridgetower.py +++ b/src/transformers/models/bridgetower/modeling_bridgetower.py @@ -1905,3 +1905,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "BridgeTowerPreTrainedModel", + "BridgeTowerModel", + "BridgeTowerForMaskedLM", + "BridgeTowerForImageAndTextRetrieval", + "BridgeTowerForContrastiveLearning", +] diff --git a/src/transformers/models/bridgetower/processing_bridgetower.py b/src/transformers/models/bridgetower/processing_bridgetower.py index 7718c3bf833f..087518e456dd 100644 --- a/src/transformers/models/bridgetower/processing_bridgetower.py +++ b/src/transformers/models/bridgetower/processing_bridgetower.py @@ -117,3 +117,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["BridgeTowerProcessor"] diff --git a/src/transformers/models/bros/__init__.py b/src/transformers/models/bros/__init__.py index 516c6349cd12..f68dccdeb164 100644 --- a/src/transformers/models/bros/__init__.py +++ b/src/transformers/models/bros/__init__.py @@ -13,63 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_bros": ["BrosConfig"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["processing_bros"] = ["BrosProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bros"] = [ - "BrosPreTrainedModel", - "BrosModel", - "BrosForTokenClassification", - "BrosSpadeEEForTokenClassification", - "BrosSpadeELForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_bros import BrosConfig - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .processing_bros import BrosProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bros import ( - BrosForTokenClassification, - BrosModel, - BrosPreTrainedModel, - BrosSpadeEEForTokenClassification, - BrosSpadeELForTokenClassification, - ) - - + from .configuration_bros import * + from .modeling_bros import * + from .processing_bros import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/bros/configuration_bros.py b/src/transformers/models/bros/configuration_bros.py index 8c2a3cc73a55..84c9989f309f 100644 --- a/src/transformers/models/bros/configuration_bros.py +++ b/src/transformers/models/bros/configuration_bros.py @@ -133,3 +133,6 @@ def __init__( self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox self.dim_bbox_projection = self.hidden_size // self.num_attention_heads self.classifier_dropout_prob = classifier_dropout_prob + + +__all__ = ["BrosConfig"] diff --git a/src/transformers/models/bros/modeling_bros.py b/src/transformers/models/bros/modeling_bros.py index c062278309b7..0e1e86c0b39f 100755 --- a/src/transformers/models/bros/modeling_bros.py +++ b/src/transformers/models/bros/modeling_bros.py @@ -1312,3 +1312,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "BrosPreTrainedModel", + "BrosModel", + "BrosForTokenClassification", + "BrosSpadeEEForTokenClassification", + "BrosSpadeELForTokenClassification", +] diff --git a/src/transformers/models/bros/processing_bros.py b/src/transformers/models/bros/processing_bros.py index 9c2e0642d8cd..4687e7f8a86a 100644 --- a/src/transformers/models/bros/processing_bros.py +++ b/src/transformers/models/bros/processing_bros.py @@ -107,3 +107,6 @@ def decode(self, *args, **kwargs): def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names return list(dict.fromkeys(tokenizer_input_names)) + + +__all__ = ["BrosProcessor"] diff --git a/src/transformers/models/byt5/__init__.py b/src/transformers/models/byt5/__init__.py index 662a427383ff..c5eb3a149e8c 100644 --- a/src/transformers/models/byt5/__init__.py +++ b/src/transformers/models/byt5/__init__.py @@ -11,18 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_byt5": ["ByT5Tokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_byt5 import ByT5Tokenizer + from .tokenization_byt5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/byt5/tokenization_byt5.py b/src/transformers/models/byt5/tokenization_byt5.py index 21513ab4cd3c..b39ba254b381 100644 --- a/src/transformers/models/byt5/tokenization_byt5.py +++ b/src/transformers/models/byt5/tokenization_byt5.py @@ -231,3 +231,6 @@ def convert_tokens_to_string(self, tokens): # ByT5Tokenizer has no vocab file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return () + + +__all__ = ["ByT5Tokenizer"] diff --git a/src/transformers/models/camembert/__init__.py b/src/transformers/models/camembert/__init__.py index 1759762f47f1..48c41208522d 100644 --- a/src/transformers/models/camembert/__init__.py +++ b/src/transformers/models/camembert/__init__.py @@ -11,128 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_camembert": ["CamembertConfig", "CamembertOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_camembert"] = ["CamembertTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_camembert_fast"] = ["CamembertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_camembert"] = [ - "CamembertForCausalLM", - "CamembertForMaskedLM", - "CamembertForMultipleChoice", - "CamembertForQuestionAnswering", - "CamembertForSequenceClassification", - "CamembertForTokenClassification", - "CamembertModel", - "CamembertPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_camembert"] = [ - "TFCamembertForCausalLM", - "TFCamembertForMaskedLM", - "TFCamembertForMultipleChoice", - "TFCamembertForQuestionAnswering", - "TFCamembertForSequenceClassification", - "TFCamembertForTokenClassification", - "TFCamembertModel", - "TFCamembertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_camembert import CamembertConfig, CamembertOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_camembert import CamembertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_camembert_fast import CamembertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_camembert import ( - CamembertForCausalLM, - CamembertForMaskedLM, - CamembertForMultipleChoice, - CamembertForQuestionAnswering, - CamembertForSequenceClassification, - CamembertForTokenClassification, - CamembertModel, - CamembertPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_camembert import ( - TFCamembertForCausalLM, - TFCamembertForMaskedLM, - TFCamembertForMultipleChoice, - TFCamembertForQuestionAnswering, - TFCamembertForSequenceClassification, - TFCamembertForTokenClassification, - TFCamembertModel, - TFCamembertPreTrainedModel, - ) - + from .configuration_camembert import * + from .modeling_camembert import * + from .modeling_tf_camembert import * + from .tokenization_camembert import * + from .tokenization_camembert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/camembert/configuration_camembert.py b/src/transformers/models/camembert/configuration_camembert.py index b5738012008a..eaf8c94b8914 100644 --- a/src/transformers/models/camembert/configuration_camembert.py +++ b/src/transformers/models/camembert/configuration_camembert.py @@ -150,3 +150,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["CamembertConfig", "CamembertOnnxConfig"] diff --git a/src/transformers/models/camembert/modeling_camembert.py b/src/transformers/models/camembert/modeling_camembert.py index 03a60a2856be..b4435c23475f 100644 --- a/src/transformers/models/camembert/modeling_camembert.py +++ b/src/transformers/models/camembert/modeling_camembert.py @@ -1718,3 +1718,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "CamembertPreTrainedModel", + "CamembertModel", + "CamembertForMaskedLM", + "CamembertForSequenceClassification", + "CamembertForMultipleChoice", + "CamembertForTokenClassification", + "CamembertForQuestionAnswering", + "CamembertForCausalLM", +] diff --git a/src/transformers/models/camembert/modeling_tf_camembert.py b/src/transformers/models/camembert/modeling_tf_camembert.py index f5ddc2242b68..4df266247f0c 100644 --- a/src/transformers/models/camembert/modeling_tf_camembert.py +++ b/src/transformers/models/camembert/modeling_tf_camembert.py @@ -1787,3 +1787,16 @@ def build(self, input_shape=None): if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) + + +__all__ = [ + "TFCamembertPreTrainedModel", + "TFCamembertModel", + "TFCamembertForMaskedLM", + "TFCamembertForSequenceClassification", + "TFCamembertForTokenClassification", + "TFCamembertForMultipleChoice", + "TFCamembertForQuestionAnswering", + "TFCamembertForCausalLM", + "TFCamembertMainLayer", +] diff --git a/src/transformers/models/camembert/tokenization_camembert.py b/src/transformers/models/camembert/tokenization_camembert.py index 113fe1b121e2..7d5934c62d88 100644 --- a/src/transformers/models/camembert/tokenization_camembert.py +++ b/src/transformers/models/camembert/tokenization_camembert.py @@ -22,6 +22,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -32,6 +33,7 @@ SPIECE_UNDERLINE = "▁" +@export(backends=("sentencepiece",)) class CamembertTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Construct a CamemBERT tokenizer. Based on @@ -316,3 +318,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["CamembertTokenizer"] diff --git a/src/transformers/models/camembert/tokenization_camembert_fast.py b/src/transformers/models/camembert/tokenization_camembert_fast.py index ffec8d98e194..c04b56183902 100644 --- a/src/transformers/models/camembert/tokenization_camembert_fast.py +++ b/src/transformers/models/camembert/tokenization_camembert_fast.py @@ -196,3 +196,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["CamembertTokenizerFast"] diff --git a/src/transformers/models/canine/__init__.py b/src/transformers/models/canine/__init__.py index 93f103344d47..c5600802bb3c 100644 --- a/src/transformers/models/canine/__init__.py +++ b/src/transformers/models/canine/__init__.py @@ -13,55 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_canine": ["CanineConfig"], - "tokenization_canine": ["CanineTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_canine"] = [ - "CanineForMultipleChoice", - "CanineForQuestionAnswering", - "CanineForSequenceClassification", - "CanineForTokenClassification", - "CanineLayer", - "CanineModel", - "CaninePreTrainedModel", - "load_tf_weights_in_canine", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_canine import CanineConfig - from .tokenization_canine import CanineTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_canine import ( - CanineForMultipleChoice, - CanineForQuestionAnswering, - CanineForSequenceClassification, - CanineForTokenClassification, - CanineLayer, - CanineModel, - CaninePreTrainedModel, - load_tf_weights_in_canine, - ) - - + from .configuration_canine import * + from .modeling_canine import * + from .tokenization_canine import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/canine/configuration_canine.py b/src/transformers/models/canine/configuration_canine.py index 9add399112f2..29e90327d08f 100644 --- a/src/transformers/models/canine/configuration_canine.py +++ b/src/transformers/models/canine/configuration_canine.py @@ -136,3 +136,6 @@ def __init__( self.num_hash_functions = num_hash_functions self.num_hash_buckets = num_hash_buckets self.local_transformer_stride = local_transformer_stride + + +__all__ = ["CanineConfig"] diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index c48559497a2e..4e602bf2a899 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -1639,3 +1639,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_canine", + "CaninePreTrainedModel", + "CanineModel", + "CanineForSequenceClassification", + "CanineForMultipleChoice", + "CanineForTokenClassification", + "CanineForQuestionAnswering", +] diff --git a/src/transformers/models/canine/tokenization_canine.py b/src/transformers/models/canine/tokenization_canine.py index 024507f77877..fe2734712dca 100644 --- a/src/transformers/models/canine/tokenization_canine.py +++ b/src/transformers/models/canine/tokenization_canine.py @@ -239,3 +239,6 @@ def create_token_type_ids_from_sequences( # CanineTokenizer has no vocab file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None): return () + + +__all__ = ["CanineTokenizer"] diff --git a/src/transformers/models/chinese_clip/__init__.py b/src/transformers/models/chinese_clip/__init__.py index 03c9665ab0d0..4423d2135d28 100644 --- a/src/transformers/models/chinese_clip/__init__.py +++ b/src/transformers/models/chinese_clip/__init__.py @@ -13,72 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_chinese_clip": [ - "ChineseCLIPConfig", - "ChineseCLIPOnnxConfig", - "ChineseCLIPTextConfig", - "ChineseCLIPVisionConfig", - ], - "processing_chinese_clip": ["ChineseCLIPProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"] - _import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_chinese_clip"] = [ - "ChineseCLIPModel", - "ChineseCLIPPreTrainedModel", - "ChineseCLIPTextModel", - "ChineseCLIPVisionModel", - ] - if TYPE_CHECKING: - from .configuration_chinese_clip import ( - ChineseCLIPConfig, - ChineseCLIPOnnxConfig, - ChineseCLIPTextConfig, - ChineseCLIPVisionConfig, - ) - from .processing_chinese_clip import ChineseCLIPProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_chinese_clip import ( - ChineseCLIPModel, - ChineseCLIPPreTrainedModel, - ChineseCLIPTextModel, - ChineseCLIPVisionModel, - ) - + from .configuration_chinese_clip import * + from .feature_extraction_chinese_clip import * + from .image_processing_chinese_clip import * + from .modeling_chinese_clip import * + from .processing_chinese_clip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/chinese_clip/configuration_chinese_clip.py b/src/transformers/models/chinese_clip/configuration_chinese_clip.py index 5b37044fab50..b53b514aaf01 100644 --- a/src/transformers/models/chinese_clip/configuration_chinese_clip.py +++ b/src/transformers/models/chinese_clip/configuration_chinese_clip.py @@ -463,3 +463,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 14 + + +__all__ = ["ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig"] diff --git a/src/transformers/models/chinese_clip/feature_extraction_chinese_clip.py b/src/transformers/models/chinese_clip/feature_extraction_chinese_clip.py index 09aa4106b718..cfdf85448726 100644 --- a/src/transformers/models/chinese_clip/feature_extraction_chinese_clip.py +++ b/src/transformers/models/chinese_clip/feature_extraction_chinese_clip.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_chinese_clip import ChineseCLIPImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ChineseCLIPFeatureExtractor"] diff --git a/src/transformers/models/chinese_clip/image_processing_chinese_clip.py b/src/transformers/models/chinese_clip/image_processing_chinese_clip.py index 515c2de0cfc3..4c2331da07b9 100644 --- a/src/transformers/models/chinese_clip/image_processing_chinese_clip.py +++ b/src/transformers/models/chinese_clip/image_processing_chinese_clip.py @@ -39,6 +39,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -48,6 +49,7 @@ import PIL +@export(backends=("vision",)) class ChineseCLIPImageProcessor(BaseImageProcessor): r""" Constructs a Chinese-CLIP image processor. @@ -304,3 +306,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ChineseCLIPImageProcessor"] diff --git a/src/transformers/models/chinese_clip/modeling_chinese_clip.py b/src/transformers/models/chinese_clip/modeling_chinese_clip.py index 6fbd9459f5ad..6eee037ec06c 100644 --- a/src/transformers/models/chinese_clip/modeling_chinese_clip.py +++ b/src/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -1565,3 +1565,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", "ChineseCLIPModel"] diff --git a/src/transformers/models/chinese_clip/processing_chinese_clip.py b/src/transformers/models/chinese_clip/processing_chinese_clip.py index 1f44fc50aed5..4a188542dbad 100644 --- a/src/transformers/models/chinese_clip/processing_chinese_clip.py +++ b/src/transformers/models/chinese_clip/processing_chinese_clip.py @@ -139,3 +139,6 @@ def feature_extractor_class(self): FutureWarning, ) return self.image_processor_class + + +__all__ = ["ChineseCLIPProcessor"] diff --git a/src/transformers/models/clap/__init__.py b/src/transformers/models/clap/__init__.py index 4d3d3ba04e13..d27f8613953f 100644 --- a/src/transformers/models/clap/__init__.py +++ b/src/transformers/models/clap/__init__.py @@ -13,60 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_clap": [ - "ClapAudioConfig", - "ClapConfig", - "ClapTextConfig", - ], - "processing_clap": ["ClapProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_clap"] = [ - "ClapModel", - "ClapPreTrainedModel", - "ClapTextModel", - "ClapTextModelWithProjection", - "ClapAudioModel", - "ClapAudioModelWithProjection", - ] - _import_structure["feature_extraction_clap"] = ["ClapFeatureExtractor"] - if TYPE_CHECKING: - from .configuration_clap import ( - ClapAudioConfig, - ClapConfig, - ClapTextConfig, - ) - from .processing_clap import ClapProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_clap import ClapFeatureExtractor - from .modeling_clap import ( - ClapAudioModel, - ClapAudioModelWithProjection, - ClapModel, - ClapPreTrainedModel, - ClapTextModel, - ClapTextModelWithProjection, - ) - - + from .configuration_clap import * + from .feature_extraction_clap import * + from .modeling_clap import * + from .processing_clap import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/clap/configuration_clap.py b/src/transformers/models/clap/configuration_clap.py index 1425e2a86289..442733f93165 100644 --- a/src/transformers/models/clap/configuration_clap.py +++ b/src/transformers/models/clap/configuration_clap.py @@ -425,3 +425,6 @@ def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: Clap """ return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs) + + +__all__ = ["ClapTextConfig", "ClapAudioConfig", "ClapConfig"] diff --git a/src/transformers/models/clap/feature_extraction_clap.py b/src/transformers/models/clap/feature_extraction_clap.py index 2d1f16e19442..42d3646065ec 100644 --- a/src/transformers/models/clap/feature_extraction_clap.py +++ b/src/transformers/models/clap/feature_extraction_clap.py @@ -360,3 +360,6 @@ def __call__( input_features = input_features.convert_to_tensors(return_tensors) return input_features + + +__all__ = ["ClapFeatureExtractor"] diff --git a/src/transformers/models/clap/modeling_clap.py b/src/transformers/models/clap/modeling_clap.py index d0224e3caa5b..002812999f27 100644 --- a/src/transformers/models/clap/modeling_clap.py +++ b/src/transformers/models/clap/modeling_clap.py @@ -2301,3 +2301,13 @@ def forward( attentions=audio_outputs.attentions, hidden_states=audio_outputs.hidden_states, ) + + +__all__ = [ + "ClapPreTrainedModel", + "ClapAudioModel", + "ClapTextModel", + "ClapModel", + "ClapTextModelWithProjection", + "ClapAudioModelWithProjection", +] diff --git a/src/transformers/models/clap/processing_clap.py b/src/transformers/models/clap/processing_clap.py index 4d1739ecf261..6df9d4aa3961 100644 --- a/src/transformers/models/clap/processing_clap.py +++ b/src/transformers/models/clap/processing_clap.py @@ -115,3 +115,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) + + +__all__ = ["ClapProcessor"] diff --git a/src/transformers/models/clip/__init__.py b/src/transformers/models/clip/__init__.py index 36247e943eca..f8aa65235972 100644 --- a/src/transformers/models/clip/__init__.py +++ b/src/transformers/models/clip/__init__.py @@ -13,165 +13,22 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_clip": [ - "CLIPConfig", - "CLIPOnnxConfig", - "CLIPTextConfig", - "CLIPVisionConfig", - ], - "processing_clip": ["CLIPProcessor"], - "tokenization_clip": ["CLIPTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_clip_fast"] = ["CLIPTokenizerFast"] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_clip"] = ["CLIPFeatureExtractor"] - _import_structure["image_processing_clip"] = ["CLIPImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_clip"] = [ - "CLIPModel", - "CLIPPreTrainedModel", - "CLIPTextModel", - "CLIPTextModelWithProjection", - "CLIPVisionModel", - "CLIPVisionModelWithProjection", - "CLIPForImageClassification", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_clip"] = [ - "TFCLIPModel", - "TFCLIPPreTrainedModel", - "TFCLIPTextModel", - "TFCLIPVisionModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_clip"] = [ - "FlaxCLIPModel", - "FlaxCLIPPreTrainedModel", - "FlaxCLIPTextModel", - "FlaxCLIPTextPreTrainedModel", - "FlaxCLIPTextModelWithProjection", - "FlaxCLIPVisionModel", - "FlaxCLIPVisionPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_clip import ( - CLIPConfig, - CLIPOnnxConfig, - CLIPTextConfig, - CLIPVisionConfig, - ) - from .processing_clip import CLIPProcessor - from .tokenization_clip import CLIPTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_clip_fast import CLIPTokenizerFast - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_clip import CLIPFeatureExtractor - from .image_processing_clip import CLIPImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_clip import ( - CLIPForImageClassification, - CLIPModel, - CLIPPreTrainedModel, - CLIPTextModel, - CLIPTextModelWithProjection, - CLIPVisionModel, - CLIPVisionModelWithProjection, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_clip import ( - TFCLIPModel, - TFCLIPPreTrainedModel, - TFCLIPTextModel, - TFCLIPVisionModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_clip import ( - FlaxCLIPModel, - FlaxCLIPPreTrainedModel, - FlaxCLIPTextModel, - FlaxCLIPTextModelWithProjection, - FlaxCLIPTextPreTrainedModel, - FlaxCLIPVisionModel, - FlaxCLIPVisionPreTrainedModel, - ) - - + from .configuration_clip import * + from .feature_extraction_clip import * + from .image_processing_clip import * + from .modeling_clip import * + from .modeling_flax_clip import * + from .modeling_tf_clip import * + from .processing_clip import * + from .tokenization_clip import * + from .tokenization_clip_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index 8e027f5c3f01..019b2e2908de 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -451,3 +451,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 14 + + +__all__ = ["CLIPTextConfig", "CLIPVisionConfig", "CLIPConfig", "CLIPOnnxConfig"] diff --git a/src/transformers/models/clip/feature_extraction_clip.py b/src/transformers/models/clip/feature_extraction_clip.py index 5696a63abe62..68d9691e88d1 100644 --- a/src/transformers/models/clip/feature_extraction_clip.py +++ b/src/transformers/models/clip/feature_extraction_clip.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_clip import CLIPImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class CLIPFeatureExtractor(CLIPImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["CLIPFeatureExtractor"] diff --git a/src/transformers/models/clip/image_processing_clip.py b/src/transformers/models/clip/image_processing_clip.py index fa398821ca61..459c06a1d5f2 100644 --- a/src/transformers/models/clip/image_processing_clip.py +++ b/src/transformers/models/clip/image_processing_clip.py @@ -40,6 +40,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -49,6 +50,7 @@ import PIL +@export(backends=("vision",)) class CLIPImageProcessor(BaseImageProcessor): r""" Constructs a CLIP image processor. @@ -343,3 +345,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["CLIPImageProcessor"] diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index 64eb027e9e22..5e55688cda0d 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -1617,3 +1617,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "CLIPPreTrainedModel", + "CLIPTextModel", + "CLIPVisionModel", + "CLIPModel", + "CLIPTextModelWithProjection", + "CLIPVisionModelWithProjection", + "CLIPForImageClassification", +] diff --git a/src/transformers/models/clip/modeling_flax_clip.py b/src/transformers/models/clip/modeling_flax_clip.py index 265e7005b74e..2a5468431eb8 100644 --- a/src/transformers/models/clip/modeling_flax_clip.py +++ b/src/transformers/models/clip/modeling_flax_clip.py @@ -1293,3 +1293,13 @@ class FlaxCLIPModel(FlaxCLIPPreTrainedModel): overwrite_call_docstring(FlaxCLIPModel, CLIP_INPUTS_DOCSTRING + FLAX_CLIP_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxCLIPModel, output_type=FlaxCLIPOutput, config_class=CLIPConfig) + +__all__ = [ + "FlaxCLIPTextPreTrainedModel", + "FlaxCLIPVisionPreTrainedModel", + "FlaxCLIPPreTrainedModel", + "FlaxCLIPTextModel", + "FlaxCLIPTextModelWithProjection", + "FlaxCLIPVisionModel", + "FlaxCLIPModel", +] diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index ca5f4aede218..5542d2615dd9 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -1455,3 +1455,14 @@ def build(self, input_shape=None): if getattr(self, "clip", None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) + + +__all__ = [ + "TFCLIPPreTrainedModel", + "TFCLIPTextModel", + "TFCLIPVisionModel", + "TFCLIPModel", + "TFCLIPTextMainLayer", + "TFCLIPMainLayer", + "TFCLIPVisionMainLayer", +] diff --git a/src/transformers/models/clip/processing_clip.py b/src/transformers/models/clip/processing_clip.py index 60805402b4ce..e69e65dec68d 100644 --- a/src/transformers/models/clip/processing_clip.py +++ b/src/transformers/models/clip/processing_clip.py @@ -151,3 +151,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["CLIPProcessor"] diff --git a/src/transformers/models/clip/tokenization_clip.py b/src/transformers/models/clip/tokenization_clip.py index 83e79890d084..41a73db8c1ec 100644 --- a/src/transformers/models/clip/tokenization_clip.py +++ b/src/transformers/models/clip/tokenization_clip.py @@ -514,3 +514,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return vocab_file, merge_file + + +__all__ = ["CLIPTokenizer"] diff --git a/src/transformers/models/clip/tokenization_clip_fast.py b/src/transformers/models/clip/tokenization_clip_fast.py index 48741a6293e4..89e7c8360310 100644 --- a/src/transformers/models/clip/tokenization_clip_fast.py +++ b/src/transformers/models/clip/tokenization_clip_fast.py @@ -159,3 +159,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["CLIPTokenizerFast"] diff --git a/src/transformers/models/clipseg/__init__.py b/src/transformers/models/clipseg/__init__.py index cb7daf11553e..2a436883551b 100644 --- a/src/transformers/models/clipseg/__init__.py +++ b/src/transformers/models/clipseg/__init__.py @@ -13,55 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_clipseg": [ - "CLIPSegConfig", - "CLIPSegTextConfig", - "CLIPSegVisionConfig", - ], - "processing_clipseg": ["CLIPSegProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_clipseg"] = [ - "CLIPSegModel", - "CLIPSegPreTrainedModel", - "CLIPSegTextModel", - "CLIPSegVisionModel", - "CLIPSegForImageSegmentation", - ] - if TYPE_CHECKING: - from .configuration_clipseg import ( - CLIPSegConfig, - CLIPSegTextConfig, - CLIPSegVisionConfig, - ) - from .processing_clipseg import CLIPSegProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_clipseg import ( - CLIPSegForImageSegmentation, - CLIPSegModel, - CLIPSegPreTrainedModel, - CLIPSegTextModel, - CLIPSegVisionModel, - ) - + from .configuration_clipseg import * + from .modeling_clipseg import * + from .processing_clipseg import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/clipseg/configuration_clipseg.py b/src/transformers/models/clipseg/configuration_clipseg.py index 0ac8196fc7f5..77c2062d540d 100644 --- a/src/transformers/models/clipseg/configuration_clipseg.py +++ b/src/transformers/models/clipseg/configuration_clipseg.py @@ -427,3 +427,6 @@ def from_text_vision_configs(cls, text_config: CLIPSegTextConfig, vision_config: """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["CLIPSegTextConfig", "CLIPSegVisionConfig", "CLIPSegConfig"] diff --git a/src/transformers/models/clipseg/modeling_clipseg.py b/src/transformers/models/clipseg/modeling_clipseg.py index a6507e431f68..c1de1db0167a 100644 --- a/src/transformers/models/clipseg/modeling_clipseg.py +++ b/src/transformers/models/clipseg/modeling_clipseg.py @@ -1472,3 +1472,12 @@ def forward( vision_model_output=vision_outputs, decoder_output=decoder_outputs, ) + + +__all__ = [ + "CLIPSegPreTrainedModel", + "CLIPSegTextModel", + "CLIPSegVisionModel", + "CLIPSegModel", + "CLIPSegForImageSegmentation", +] diff --git a/src/transformers/models/clipseg/processing_clipseg.py b/src/transformers/models/clipseg/processing_clipseg.py index f8eaca82334a..bd817ae78655 100644 --- a/src/transformers/models/clipseg/processing_clipseg.py +++ b/src/transformers/models/clipseg/processing_clipseg.py @@ -159,3 +159,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["CLIPSegProcessor"] diff --git a/src/transformers/models/clvp/__init__.py b/src/transformers/models/clvp/__init__.py index 6ef4bc60e321..27bbd7aa90b8 100644 --- a/src/transformers/models/clvp/__init__.py +++ b/src/transformers/models/clvp/__init__.py @@ -13,67 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_clvp": [ - "ClvpConfig", - "ClvpDecoderConfig", - "ClvpEncoderConfig", - ], - "feature_extraction_clvp": ["ClvpFeatureExtractor"], - "processing_clvp": ["ClvpProcessor"], - "tokenization_clvp": ["ClvpTokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_clvp"] = [ - "ClvpModelForConditionalGeneration", - "ClvpForCausalLM", - "ClvpModel", - "ClvpPreTrainedModel", - "ClvpEncoder", - "ClvpDecoder", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_clvp import ( - ClvpConfig, - ClvpDecoderConfig, - ClvpEncoderConfig, - ) - from .feature_extraction_clvp import ClvpFeatureExtractor - from .processing_clvp import ClvpProcessor - from .tokenization_clvp import ClvpTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_clvp import ( - ClvpDecoder, - ClvpEncoder, - ClvpForCausalLM, - ClvpModel, - ClvpModelForConditionalGeneration, - ClvpPreTrainedModel, - ) - + from .configuration_clvp import * + from .feature_extraction_clvp import * + from .modeling_clvp import * + from .processing_clvp import * + from .tokenization_clvp import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/clvp/configuration_clvp.py b/src/transformers/models/clvp/configuration_clvp.py index d17a04c861bf..a2d1743db161 100644 --- a/src/transformers/models/clvp/configuration_clvp.py +++ b/src/transformers/models/clvp/configuration_clvp.py @@ -450,3 +450,6 @@ def from_sub_model_configs( decoder_config=decoder_config.to_dict(), **kwargs, ) + + +__all__ = ["ClvpEncoderConfig", "ClvpDecoderConfig", "ClvpConfig"] diff --git a/src/transformers/models/clvp/feature_extraction_clvp.py b/src/transformers/models/clvp/feature_extraction_clvp.py index cb85b17a7f17..2dbda430bb25 100644 --- a/src/transformers/models/clvp/feature_extraction_clvp.py +++ b/src/transformers/models/clvp/feature_extraction_clvp.py @@ -236,3 +236,6 @@ def __call__( padded_inputs["input_features"] = input_features return padded_inputs.convert_to_tensors(return_tensors) + + +__all__ = ["ClvpFeatureExtractor"] diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py index 479b0fac2b04..4c07ce2485b8 100644 --- a/src/transformers/models/clvp/modeling_clvp.py +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -2019,3 +2019,13 @@ def generate( text_encoder_hidden_states=text_outputs.hidden_states, speech_encoder_hidden_states=speech_outputs.hidden_states, ) + + +__all__ = [ + "ClvpPreTrainedModel", + "ClvpEncoder", + "ClvpDecoder", + "ClvpModel", + "ClvpForCausalLM", + "ClvpModelForConditionalGeneration", +] diff --git a/src/transformers/models/clvp/processing_clvp.py b/src/transformers/models/clvp/processing_clvp.py index 4e015cea1f84..3f4d54f25903 100644 --- a/src/transformers/models/clvp/processing_clvp.py +++ b/src/transformers/models/clvp/processing_clvp.py @@ -88,3 +88,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["ClvpProcessor"] diff --git a/src/transformers/models/clvp/tokenization_clvp.py b/src/transformers/models/clvp/tokenization_clvp.py index d77564f718a5..85ae1d6991eb 100644 --- a/src/transformers/models/clvp/tokenization_clvp.py +++ b/src/transformers/models/clvp/tokenization_clvp.py @@ -362,3 +362,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return vocab_file, merge_file + + +__all__ = ["ClvpTokenizer"] diff --git a/src/transformers/models/code_llama/__init__.py b/src/transformers/models/code_llama/__init__.py index 8c99c023419b..d6098d287ec5 100644 --- a/src/transformers/models/code_llama/__init__.py +++ b/src/transformers/models/code_llama/__init__.py @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_code_llama"] = ["CodeLlamaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_code_llama_fast"] = ["CodeLlamaTokenizerFast"] - if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_code_llama import CodeLlamaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_code_llama_fast import CodeLlamaTokenizerFast - + from .tokenization_code_llama import * + from .tokenization_code_llama_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/code_llama/tokenization_code_llama.py b/src/transformers/models/code_llama/tokenization_code_llama.py index cc906687874c..427c147b39ae 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama.py +++ b/src/transformers/models/code_llama/tokenization_code_llama.py @@ -25,6 +25,7 @@ from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging, requires_backends +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -46,6 +47,7 @@ # fmt: on +@export(backends=("sentencepiece",)) class CodeLlamaTokenizer(PreTrainedTokenizer): """ Construct a CodeLlama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as @@ -447,3 +449,6 @@ def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) + + +__all__ = ["CodeLlamaTokenizer"] diff --git a/src/transformers/models/code_llama/tokenization_code_llama_fast.py b/src/transformers/models/code_llama/tokenization_code_llama_fast.py index b832348d07af..3bc831cdd6a1 100644 --- a/src/transformers/models/code_llama/tokenization_code_llama_fast.py +++ b/src/transformers/models/code_llama/tokenization_code_llama_fast.py @@ -376,3 +376,6 @@ def build_inputs_with_special_tokens( if token_ids_1 is None: return self.bos_token_id + token_ids_0 + self.eos_token_id return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id + + +__all__ = ["CodeLlamaTokenizerFast"] diff --git a/src/transformers/models/codegen/__init__.py b/src/transformers/models/codegen/__init__.py index 7d4cb05adb20..9f0a3452b012 100644 --- a/src/transformers/models/codegen/__init__.py +++ b/src/transformers/models/codegen/__init__.py @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_codegen": ["CodeGenConfig", "CodeGenOnnxConfig"], - "tokenization_codegen": ["CodeGenTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_codegen"] = [ - "CodeGenForCausalLM", - "CodeGenModel", - "CodeGenPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_codegen import CodeGenConfig, CodeGenOnnxConfig - from .tokenization_codegen import CodeGenTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_codegen_fast import CodeGenTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_codegen import ( - CodeGenForCausalLM, - CodeGenModel, - CodeGenPreTrainedModel, - ) - + from .configuration_codegen import * + from .modeling_codegen import * + from .tokenization_codegen import * + from .tokenization_codegen_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/codegen/configuration_codegen.py b/src/transformers/models/codegen/configuration_codegen.py index cf69001480c5..9d9fba8ac2d9 100644 --- a/src/transformers/models/codegen/configuration_codegen.py +++ b/src/transformers/models/codegen/configuration_codegen.py @@ -17,10 +17,11 @@ from collections import OrderedDict from typing import Any, List, Mapping, Optional -from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec +from ...tokenization_utils import PreTrainedTokenizer, TensorType from ...utils import logging +from ...utils.import_utils import is_torch_available logger = logging.get_logger(__name__) @@ -225,3 +226,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["CodeGenConfig", "CodeGenOnnxConfig"] diff --git a/src/transformers/models/codegen/modeling_codegen.py b/src/transformers/models/codegen/modeling_codegen.py index 46eea43e1285..829958650734 100644 --- a/src/transformers/models/codegen/modeling_codegen.py +++ b/src/transformers/models/codegen/modeling_codegen.py @@ -880,3 +880,6 @@ def _reorder_cache( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) for layer_past in past_key_values ) + + +__all__ = ["CodeGenPreTrainedModel", "CodeGenModel", "CodeGenForCausalLM"] diff --git a/src/transformers/models/codegen/tokenization_codegen.py b/src/transformers/models/codegen/tokenization_codegen.py index f3f765d273a3..2b584e83b1b9 100644 --- a/src/transformers/models/codegen/tokenization_codegen.py +++ b/src/transformers/models/codegen/tokenization_codegen.py @@ -414,3 +414,6 @@ def find_re(string, pattern, start_pos): return completion[: min(terminals_pos)] else: return completion + + +__all__ = ["CodeGenTokenizer"] diff --git a/src/transformers/models/codegen/tokenization_codegen_fast.py b/src/transformers/models/codegen/tokenization_codegen_fast.py index 9fdf2ec38ed3..fcfe1d2795b4 100644 --- a/src/transformers/models/codegen/tokenization_codegen_fast.py +++ b/src/transformers/models/codegen/tokenization_codegen_fast.py @@ -270,3 +270,6 @@ def find_re(string, pattern, start_pos): return completion[: min(terminals_pos)] else: return completion + + +__all__ = ["CodeGenTokenizerFast"] diff --git a/src/transformers/models/cohere/__init__.py b/src/transformers/models/cohere/__init__.py index f92e8b68a50a..8a422f0138ed 100644 --- a/src/transformers/models/cohere/__init__.py +++ b/src/transformers/models/cohere/__init__.py @@ -13,65 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_cohere": ["CohereConfig"], -} - - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_cohere_fast"] = ["CohereTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cohere"] = [ - "CohereForCausalLM", - "CohereModel", - "CoherePreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_cohere import CohereConfig - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_cohere_fast import CohereTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cohere import ( - CohereForCausalLM, - CohereModel, - CoherePreTrainedModel, - ) - + from .configuration_cohere import * + from .modeling_cohere import * + from .tokenization_cohere_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cohere/configuration_cohere.py b/src/transformers/models/cohere/configuration_cohere.py index 73973bfad60b..864fe972d92b 100644 --- a/src/transformers/models/cohere/configuration_cohere.py +++ b/src/transformers/models/cohere/configuration_cohere.py @@ -155,3 +155,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["CohereConfig"] diff --git a/src/transformers/models/cohere/modeling_cohere.py b/src/transformers/models/cohere/modeling_cohere.py index 4010d9ec3a43..278dc8018743 100644 --- a/src/transformers/models/cohere/modeling_cohere.py +++ b/src/transformers/models/cohere/modeling_cohere.py @@ -1183,3 +1183,6 @@ def prepare_inputs_for_generation( } ) return model_inputs + + +__all__ = ["CoherePreTrainedModel", "CohereModel", "CohereForCausalLM"] diff --git a/src/transformers/models/cohere/tokenization_cohere_fast.py b/src/transformers/models/cohere/tokenization_cohere_fast.py index bac665b473c5..e99df5c609c8 100644 --- a/src/transformers/models/cohere/tokenization_cohere_fast.py +++ b/src/transformers/models/cohere/tokenization_cohere_fast.py @@ -510,3 +510,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = output + bos_token_id + token_ids_1 + eos_token_id return output + + +__all__ = ["CohereTokenizerFast"] diff --git a/src/transformers/models/conditional_detr/__init__.py b/src/transformers/models/conditional_detr/__init__.py index c7d5c5261d6e..3fc16ccb5dc0 100644 --- a/src/transformers/models/conditional_detr/__init__.py +++ b/src/transformers/models/conditional_detr/__init__.py @@ -11,71 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_conditional_detr": [ - "ConditionalDetrConfig", - "ConditionalDetrOnnxConfig", - ] -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_conditional_detr"] = ["ConditionalDetrFeatureExtractor"] - _import_structure["image_processing_conditional_detr"] = ["ConditionalDetrImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_conditional_detr"] = [ - "ConditionalDetrForObjectDetection", - "ConditionalDetrForSegmentation", - "ConditionalDetrModel", - "ConditionalDetrPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_conditional_detr import ( - ConditionalDetrConfig, - ConditionalDetrOnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor - from .image_processing_conditional_detr import ConditionalDetrImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_conditional_detr import ( - ConditionalDetrForObjectDetection, - ConditionalDetrForSegmentation, - ConditionalDetrModel, - ConditionalDetrPreTrainedModel, - ) - + from .configuration_conditional_detr import * + from .feature_extraction_conditional_detr import * + from .image_processing_conditional_detr import * + from .modeling_conditional_detr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/conditional_detr/configuration_conditional_detr.py b/src/transformers/models/conditional_detr/configuration_conditional_detr.py index 64364c653dd9..8dae72edff08 100644 --- a/src/transformers/models/conditional_detr/configuration_conditional_detr.py +++ b/src/transformers/models/conditional_detr/configuration_conditional_detr.py @@ -273,3 +273,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["ConditionalDetrConfig", "ConditionalDetrOnnxConfig"] diff --git a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py index bfdec373f865..6a3e3206fe26 100644 --- a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py +++ b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py @@ -18,6 +18,7 @@ from ...image_transforms import rgb_to_id as _rgb_to_id from ...utils import logging +from ...utils.import_utils import export from .image_processing_conditional_detr import ConditionalDetrImageProcessor @@ -33,6 +34,7 @@ def rgb_to_id(x): return _rgb_to_id(x) +@export(backends=("vision",)) class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -41,3 +43,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ConditionalDetrFeatureExtractor"] diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index c7bc27207bd3..062c4b3952cc 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -64,6 +64,7 @@ is_vision_available, logging, ) +from ...utils.import_utils import export if is_torch_available(): @@ -801,6 +802,7 @@ def compute_segments( return segmentation, segments +@export(backends=("vision",)) class ConditionalDetrImageProcessor(BaseImageProcessor): r""" Constructs a Conditional Detr image processor. @@ -1851,3 +1853,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["ConditionalDetrImageProcessor"] diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index e0dcca67aefb..ca294adfb1ce 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -2633,3 +2633,11 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask) + + +__all__ = [ + "ConditionalDetrPreTrainedModel", + "ConditionalDetrModel", + "ConditionalDetrForObjectDetection", + "ConditionalDetrForSegmentation", +] diff --git a/src/transformers/models/convbert/__init__.py b/src/transformers/models/convbert/__init__.py index 15c6bb51767a..86c52dc19d0c 100644 --- a/src/transformers/models/convbert/__init__.py +++ b/src/transformers/models/convbert/__init__.py @@ -13,114 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_convbert": ["ConvBertConfig", "ConvBertOnnxConfig"], - "tokenization_convbert": ["ConvBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_convbert_fast"] = ["ConvBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_convbert"] = [ - "ConvBertForMaskedLM", - "ConvBertForMultipleChoice", - "ConvBertForQuestionAnswering", - "ConvBertForSequenceClassification", - "ConvBertForTokenClassification", - "ConvBertLayer", - "ConvBertModel", - "ConvBertPreTrainedModel", - "load_tf_weights_in_convbert", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_convbert"] = [ - "TFConvBertForMaskedLM", - "TFConvBertForMultipleChoice", - "TFConvBertForQuestionAnswering", - "TFConvBertForSequenceClassification", - "TFConvBertForTokenClassification", - "TFConvBertLayer", - "TFConvBertModel", - "TFConvBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_convbert import ConvBertConfig, ConvBertOnnxConfig - from .tokenization_convbert import ConvBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_convbert_fast import ConvBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_convbert import ( - ConvBertForMaskedLM, - ConvBertForMultipleChoice, - ConvBertForQuestionAnswering, - ConvBertForSequenceClassification, - ConvBertForTokenClassification, - ConvBertLayer, - ConvBertModel, - ConvBertPreTrainedModel, - load_tf_weights_in_convbert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_convbert import ( - TFConvBertForMaskedLM, - TFConvBertForMultipleChoice, - TFConvBertForQuestionAnswering, - TFConvBertForSequenceClassification, - TFConvBertForTokenClassification, - TFConvBertLayer, - TFConvBertModel, - TFConvBertPreTrainedModel, - ) - - + from .configuration_convbert import * + from .modeling_convbert import * + from .modeling_tf_convbert import * + from .tokenization_convbert import * + from .tokenization_convbert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/convbert/configuration_convbert.py b/src/transformers/models/convbert/configuration_convbert.py index 2c6b544568b7..558ef5638cd4 100644 --- a/src/transformers/models/convbert/configuration_convbert.py +++ b/src/transformers/models/convbert/configuration_convbert.py @@ -155,3 +155,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["ConvBertConfig", "ConvBertOnnxConfig"] diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index b92ff686edec..921b35f8b309 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -1331,3 +1331,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_convbert", + "ConvBertPreTrainedModel", + "ConvBertModel", + "ConvBertForMaskedLM", + "ConvBertForSequenceClassification", + "ConvBertForMultipleChoice", + "ConvBertForTokenClassification", + "ConvBertForQuestionAnswering", +] diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index 95be5a56e195..c01ae4007287 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -1462,3 +1462,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFConvBertPreTrainedModel", + "TFConvBertModel", + "TFConvBertForMaskedLM", + "TFConvBertForSequenceClassification", + "TFConvBertForMultipleChoice", + "TFConvBertForTokenClassification", + "TFConvBertForQuestionAnswering", + "TFConvBertMainLayer", +] diff --git a/src/transformers/models/convbert/tokenization_convbert.py b/src/transformers/models/convbert/tokenization_convbert.py index cc8cb1b9a738..b460fa15c199 100644 --- a/src/transformers/models/convbert/tokenization_convbert.py +++ b/src/transformers/models/convbert/tokenization_convbert.py @@ -502,3 +502,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["ConvBertTokenizer"] diff --git a/src/transformers/models/convbert/tokenization_convbert_fast.py b/src/transformers/models/convbert/tokenization_convbert_fast.py index e9c47c2b04bc..59262d976e97 100644 --- a/src/transformers/models/convbert/tokenization_convbert_fast.py +++ b/src/transformers/models/convbert/tokenization_convbert_fast.py @@ -171,3 +171,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["ConvBertTokenizerFast"] diff --git a/src/transformers/models/convnext/__init__.py b/src/transformers/models/convnext/__init__.py index 4e9a90bd4deb..6c6c549949cd 100644 --- a/src/transformers/models/convnext/__init__.py +++ b/src/transformers/models/convnext/__init__.py @@ -13,86 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_convnext": ["ConvNextConfig", "ConvNextOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"] - _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_convnext"] = [ - "ConvNextForImageClassification", - "ConvNextModel", - "ConvNextPreTrainedModel", - "ConvNextBackbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_convnext"] = [ - "TFConvNextForImageClassification", - "TFConvNextModel", - "TFConvNextPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_convnext import ConvNextConfig, ConvNextOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_convnext import ConvNextFeatureExtractor - from .image_processing_convnext import ConvNextImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_convnext import ( - ConvNextBackbone, - ConvNextForImageClassification, - ConvNextModel, - ConvNextPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel - - + from .configuration_convnext import * + from .feature_extraction_convnext import * + from .image_processing_convnext import * + from .modeling_convnext import * + from .modeling_tf_convnext import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index b4fe1e60e872..9f9ed3bfd469 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -137,3 +137,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-5 + + +__all__ = ["ConvNextConfig", "ConvNextOnnxConfig"] diff --git a/src/transformers/models/convnext/feature_extraction_convnext.py b/src/transformers/models/convnext/feature_extraction_convnext.py index 92b8a8f4fba8..dd30f28cd604 100644 --- a/src/transformers/models/convnext/feature_extraction_convnext.py +++ b/src/transformers/models/convnext/feature_extraction_convnext.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_convnext import ConvNextImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ConvNextFeatureExtractor"] diff --git a/src/transformers/models/convnext/image_processing_convnext.py b/src/transformers/models/convnext/image_processing_convnext.py index aaabc677f182..99eb79b7a335 100644 --- a/src/transformers/models/convnext/image_processing_convnext.py +++ b/src/transformers/models/convnext/image_processing_convnext.py @@ -39,6 +39,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -48,6 +49,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ConvNextImageProcessor(BaseImageProcessor): r""" Constructs a ConvNeXT image processor. @@ -318,3 +320,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ConvNextImageProcessor"] diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index a0deaf96d5d1..f26910705d22 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -546,3 +546,6 @@ def forward( hidden_states=hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["ConvNextPreTrainedModel", "ConvNextModel", "ConvNextForImageClassification", "ConvNextBackbone"] diff --git a/src/transformers/models/convnext/modeling_tf_convnext.py b/src/transformers/models/convnext/modeling_tf_convnext.py index 0e348a838a9a..6400e3616775 100644 --- a/src/transformers/models/convnext/modeling_tf_convnext.py +++ b/src/transformers/models/convnext/modeling_tf_convnext.py @@ -664,3 +664,6 @@ def build(self, input_shape=None): if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFConvNextPreTrainedModel", "TFConvNextModel", "TFConvNextForImageClassification", "TFConvNextMainLayer"] diff --git a/src/transformers/models/convnextv2/__init__.py b/src/transformers/models/convnextv2/__init__.py index 5505868c14a4..ba3796050742 100644 --- a/src/transformers/models/convnextv2/__init__.py +++ b/src/transformers/models/convnextv2/__init__.py @@ -1,89 +1,18 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_tf_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_convnextv2": ["ConvNextV2Config"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_convnextv2"] = [ - "ConvNextV2ForImageClassification", - "ConvNextV2Model", - "ConvNextV2PreTrainedModel", - "ConvNextV2Backbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_convnextv2"] = [ - "TFConvNextV2ForImageClassification", - "TFConvNextV2Model", - "TFConvNextV2PreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_convnextv2 import ( - ConvNextV2Config, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_convnextv2 import ( - ConvNextV2Backbone, - ConvNextV2ForImageClassification, - ConvNextV2Model, - ConvNextV2PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_convnextv2 import ( - TFConvNextV2ForImageClassification, - TFConvNextV2Model, - TFConvNextV2PreTrainedModel, - ) - + from .modeling_convnextv2 import * + from .modeling_tf_convnextv2 import * + from .configuration_convnextv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/convnextv2/configuration_convnextv2.py b/src/transformers/models/convnextv2/configuration_convnextv2.py index af239aaef742..d524df82655b 100644 --- a/src/transformers/models/convnextv2/configuration_convnextv2.py +++ b/src/transformers/models/convnextv2/configuration_convnextv2.py @@ -111,3 +111,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["ConvNextV2Config"] diff --git a/src/transformers/models/convnextv2/modeling_convnextv2.py b/src/transformers/models/convnextv2/modeling_convnextv2.py index df13a5ea6b6b..280a236da23d 100644 --- a/src/transformers/models/convnextv2/modeling_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_convnextv2.py @@ -569,3 +569,6 @@ def forward( hidden_states=hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["ConvNextV2PreTrainedModel", "ConvNextV2Model", "ConvNextV2ForImageClassification", "ConvNextV2Backbone"] diff --git a/src/transformers/models/convnextv2/modeling_tf_convnextv2.py b/src/transformers/models/convnextv2/modeling_tf_convnextv2.py index d8b141633472..795edd3de40d 100644 --- a/src/transformers/models/convnextv2/modeling_tf_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_tf_convnextv2.py @@ -678,3 +678,11 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = [ + "TFConvNextV2PreTrainedModel", + "TFConvNextV2Model", + "TFConvNextV2ForImageClassification", + "TFConvNextV2MainLayer", +] diff --git a/src/transformers/models/cpm/__init__.py b/src/transformers/models/cpm/__init__.py index be6b0f66898e..079183f5b4b6 100644 --- a/src/transformers/models/cpm/__init__.py +++ b/src/transformers/models/cpm/__init__.py @@ -11,49 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_cpm"] = ["CpmTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_cpm import CpmTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_cpm_fast import CpmTokenizerFast - + from .tokenization_cpm import * + from .tokenization_cpm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cpm/tokenization_cpm.py b/src/transformers/models/cpm/tokenization_cpm.py index c92afb7eb6d2..8872b942593c 100644 --- a/src/transformers/models/cpm/tokenization_cpm.py +++ b/src/transformers/models/cpm/tokenization_cpm.py @@ -23,6 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -30,6 +31,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} +@export(backends=("sentencepiece",)) class CpmTokenizer(PreTrainedTokenizer): """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models.""" @@ -343,3 +345,6 @@ def _decode(self, *args, **kwargs): text = super()._decode(*args, **kwargs) text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n") return text + + +__all__ = ["CpmTokenizer"] diff --git a/src/transformers/models/cpm/tokenization_cpm_fast.py b/src/transformers/models/cpm/tokenization_cpm_fast.py index 3dcf624843c5..ef933e084ddb 100644 --- a/src/transformers/models/cpm/tokenization_cpm_fast.py +++ b/src/transformers/models/cpm/tokenization_cpm_fast.py @@ -236,3 +236,6 @@ def _decode(self, *args, **kwargs): text = super()._decode(*args, **kwargs) text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n") return text + + +__all__ = ["CpmTokenizerFast"] diff --git a/src/transformers/models/cpmant/__init__.py b/src/transformers/models/cpmant/__init__.py index 61db942a4f66..b9b5b28e8d9f 100644 --- a/src/transformers/models/cpmant/__init__.py +++ b/src/transformers/models/cpmant/__init__.py @@ -1,62 +1,18 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_cpmant": ["CpmAntConfig"], - "tokenization_cpmant": ["CpmAntTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cpmant"] = [ - "CpmAntForCausalLM", - "CpmAntModel", - "CpmAntPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_cpmant import CpmAntConfig - from .tokenization_cpmant import CpmAntTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cpmant import ( - CpmAntForCausalLM, - CpmAntModel, - CpmAntPreTrainedModel, - ) - - + from .tokenization_cpmant import * + from .modeling_cpmant import * + from .configuration_cpmant import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cpmant/configuration_cpmant.py b/src/transformers/models/cpmant/configuration_cpmant.py index 155811913a95..c3368d67af7a 100644 --- a/src/transformers/models/cpmant/configuration_cpmant.py +++ b/src/transformers/models/cpmant/configuration_cpmant.py @@ -117,3 +117,6 @@ def __init__( self.use_cache = use_cache self.vocab_size = vocab_size self.init_std = init_std + + +__all__ = ["CpmAntConfig"] diff --git a/src/transformers/models/cpmant/modeling_cpmant.py b/src/transformers/models/cpmant/modeling_cpmant.py index c8a313505251..6d514b1820f5 100755 --- a/src/transformers/models/cpmant/modeling_cpmant.py +++ b/src/transformers/models/cpmant/modeling_cpmant.py @@ -866,3 +866,6 @@ def _reorder_cache(self, past_key_values, beam_idx): key_value_layer[0] = key_value_layer[0][beam_idx] key_value_layer[1] = key_value_layer[1][beam_idx] return past_key_values + + +__all__ = ["CpmAntPreTrainedModel", "CpmAntModel", "CpmAntForCausalLM"] diff --git a/src/transformers/models/cpmant/tokenization_cpmant.py b/src/transformers/models/cpmant/tokenization_cpmant.py index 094a14ffce06..2da1d6286c5e 100644 --- a/src/transformers/models/cpmant/tokenization_cpmant.py +++ b/src/transformers/models/cpmant/tokenization_cpmant.py @@ -265,3 +265,6 @@ def get_special_tokens_mask( if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) return [1] + ([0] * len(token_ids_0)) + + +__all__ = ["CpmAntTokenizer"] diff --git a/src/transformers/models/ctrl/__init__.py b/src/transformers/models/ctrl/__init__.py index f64cced4e28b..f43ed72fd96a 100644 --- a/src/transformers/models/ctrl/__init__.py +++ b/src/transformers/models/ctrl/__init__.py @@ -11,75 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_ctrl": ["CTRLConfig"], - "tokenization_ctrl": ["CTRLTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ctrl"] = [ - "CTRLForSequenceClassification", - "CTRLLMHeadModel", - "CTRLModel", - "CTRLPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_ctrl"] = [ - "TFCTRLForSequenceClassification", - "TFCTRLLMHeadModel", - "TFCTRLModel", - "TFCTRLPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_ctrl import CTRLConfig - from .tokenization_ctrl import CTRLTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ctrl import ( - CTRLForSequenceClassification, - CTRLLMHeadModel, - CTRLModel, - CTRLPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_ctrl import ( - TFCTRLForSequenceClassification, - TFCTRLLMHeadModel, - TFCTRLModel, - TFCTRLPreTrainedModel, - ) - + from .configuration_ctrl import * + from .modeling_ctrl import * + from .modeling_tf_ctrl import * + from .tokenization_ctrl import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ctrl/configuration_ctrl.py b/src/transformers/models/ctrl/configuration_ctrl.py index adea61cd67fb..7a812f0b5565 100644 --- a/src/transformers/models/ctrl/configuration_ctrl.py +++ b/src/transformers/models/ctrl/configuration_ctrl.py @@ -111,3 +111,6 @@ def __init__( self.use_cache = use_cache super().__init__(**kwargs) + + +__all__ = ["CTRLConfig"] diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index bbf3b10a62ec..967bab58d43c 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -836,3 +836,6 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = ["CTRLPreTrainedModel", "CTRLModel", "CTRLLMHeadModel", "CTRLForSequenceClassification"] diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index 3feecf9a205f..6d1532842939 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -926,3 +926,12 @@ def build(self, input_shape=None): if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) + + +__all__ = [ + "TFCTRLPreTrainedModel", + "TFCTRLModel", + "TFCTRLLMHeadModel", + "TFCTRLForSequenceClassification", + "TFCTRLMainLayer", +] diff --git a/src/transformers/models/ctrl/tokenization_ctrl.py b/src/transformers/models/ctrl/tokenization_ctrl.py index 5305f2b231b8..66dae2b05fa6 100644 --- a/src/transformers/models/ctrl/tokenization_ctrl.py +++ b/src/transformers/models/ctrl/tokenization_ctrl.py @@ -246,3 +246,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far) + + +__all__ = ["CTRLTokenizer"] diff --git a/src/transformers/models/cvt/__init__.py b/src/transformers/models/cvt/__init__.py index 7018b41d58e8..ac8a4fe9ad2d 100644 --- a/src/transformers/models/cvt/__init__.py +++ b/src/transformers/models/cvt/__init__.py @@ -13,65 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_cvt": ["CvtConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cvt"] = [ - "CvtForImageClassification", - "CvtModel", - "CvtPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_cvt"] = [ - "TFCvtForImageClassification", - "TFCvtModel", - "TFCvtPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_cvt import CvtConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cvt import ( - CvtForImageClassification, - CvtModel, - CvtPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_cvt import ( - TFCvtForImageClassification, - TFCvtModel, - TFCvtPreTrainedModel, - ) - - + from .configuration_cvt import * + from .modeling_cvt import * + from .modeling_tf_cvt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/cvt/configuration_cvt.py b/src/transformers/models/cvt/configuration_cvt.py index a966701cee64..38cba6874f68 100644 --- a/src/transformers/models/cvt/configuration_cvt.py +++ b/src/transformers/models/cvt/configuration_cvt.py @@ -141,3 +141,6 @@ def __init__( self.stride_q = stride_q self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps + + +__all__ = ["CvtConfig"] diff --git a/src/transformers/models/cvt/modeling_cvt.py b/src/transformers/models/cvt/modeling_cvt.py index 796382444427..3e0f17790cd6 100644 --- a/src/transformers/models/cvt/modeling_cvt.py +++ b/src/transformers/models/cvt/modeling_cvt.py @@ -720,3 +720,6 @@ def forward( return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + +__all__ = ["CvtPreTrainedModel", "CvtModel", "CvtForImageClassification"] diff --git a/src/transformers/models/cvt/modeling_tf_cvt.py b/src/transformers/models/cvt/modeling_tf_cvt.py index 617fc99733e0..01778877906f 100644 --- a/src/transformers/models/cvt/modeling_tf_cvt.py +++ b/src/transformers/models/cvt/modeling_tf_cvt.py @@ -1091,3 +1091,6 @@ def build(self, input_shape=None): if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.embed_dim[-1]]) + + +__all__ = ["TFCvtPreTrainedModel", "TFCvtModel", "TFCvtForImageClassification", "TFCvtMainLayer"] diff --git a/src/transformers/models/data2vec/__init__.py b/src/transformers/models/data2vec/__init__.py index 525068db5983..a81b5f7d5cf7 100644 --- a/src/transformers/models/data2vec/__init__.py +++ b/src/transformers/models/data2vec/__init__.py @@ -11,115 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_data2vec_audio": ["Data2VecAudioConfig"], - "configuration_data2vec_text": [ - "Data2VecTextConfig", - "Data2VecTextOnnxConfig", - ], - "configuration_data2vec_vision": [ - "Data2VecVisionConfig", - "Data2VecVisionOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_data2vec_audio"] = [ - "Data2VecAudioForAudioFrameClassification", - "Data2VecAudioForCTC", - "Data2VecAudioForSequenceClassification", - "Data2VecAudioForXVector", - "Data2VecAudioModel", - "Data2VecAudioPreTrainedModel", - ] - _import_structure["modeling_data2vec_text"] = [ - "Data2VecTextForCausalLM", - "Data2VecTextForMaskedLM", - "Data2VecTextForMultipleChoice", - "Data2VecTextForQuestionAnswering", - "Data2VecTextForSequenceClassification", - "Data2VecTextForTokenClassification", - "Data2VecTextModel", - "Data2VecTextPreTrainedModel", - ] - _import_structure["modeling_data2vec_vision"] = [ - "Data2VecVisionForImageClassification", - "Data2VecVisionForMaskedImageModeling", - "Data2VecVisionForSemanticSegmentation", - "Data2VecVisionModel", - "Data2VecVisionPreTrainedModel", - ] - -if is_tf_available(): - _import_structure["modeling_tf_data2vec_vision"] = [ - "TFData2VecVisionForImageClassification", - "TFData2VecVisionForSemanticSegmentation", - "TFData2VecVisionModel", - "TFData2VecVisionPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_data2vec_audio import Data2VecAudioConfig - from .configuration_data2vec_text import ( - Data2VecTextConfig, - Data2VecTextOnnxConfig, - ) - from .configuration_data2vec_vision import ( - Data2VecVisionConfig, - Data2VecVisionOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_data2vec_audio import ( - Data2VecAudioForAudioFrameClassification, - Data2VecAudioForCTC, - Data2VecAudioForSequenceClassification, - Data2VecAudioForXVector, - Data2VecAudioModel, - Data2VecAudioPreTrainedModel, - ) - from .modeling_data2vec_text import ( - Data2VecTextForCausalLM, - Data2VecTextForMaskedLM, - Data2VecTextForMultipleChoice, - Data2VecTextForQuestionAnswering, - Data2VecTextForSequenceClassification, - Data2VecTextForTokenClassification, - Data2VecTextModel, - Data2VecTextPreTrainedModel, - ) - from .modeling_data2vec_vision import ( - Data2VecVisionForImageClassification, - Data2VecVisionForMaskedImageModeling, - Data2VecVisionForSemanticSegmentation, - Data2VecVisionModel, - Data2VecVisionPreTrainedModel, - ) - if is_tf_available(): - from .modeling_tf_data2vec_vision import ( - TFData2VecVisionForImageClassification, - TFData2VecVisionForSemanticSegmentation, - TFData2VecVisionModel, - TFData2VecVisionPreTrainedModel, - ) - + from .configuration_data2vec_audio import * + from .configuration_data2vec_text import * + from .configuration_data2vec_vision import * + from .modeling_data2vec_audio import * + from .modeling_data2vec_text import * + from .modeling_data2vec_vision import * + from .modeling_tf_data2vec_vision import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/data2vec/configuration_data2vec_audio.py b/src/transformers/models/data2vec/configuration_data2vec_audio.py index 54754a8c798b..8066829027fb 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_audio.py +++ b/src/transformers/models/data2vec/configuration_data2vec_audio.py @@ -283,3 +283,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return math.prod(self.conv_stride) + + +__all__ = ["Data2VecAudioConfig"] diff --git a/src/transformers/models/data2vec/configuration_data2vec_text.py b/src/transformers/models/data2vec/configuration_data2vec_text.py index 6cd7b80c302e..3aa9a6b7bf22 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_text.py +++ b/src/transformers/models/data2vec/configuration_data2vec_text.py @@ -149,3 +149,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["Data2VecTextConfig", "Data2VecTextOnnxConfig"] diff --git a/src/transformers/models/data2vec/configuration_data2vec_vision.py b/src/transformers/models/data2vec/configuration_data2vec_vision.py index d63a564cecfe..b822b03ef3eb 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_vision.py +++ b/src/transformers/models/data2vec/configuration_data2vec_vision.py @@ -189,3 +189,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["Data2VecVisionConfig", "Data2VecVisionOnnxConfig"] diff --git a/src/transformers/models/data2vec/modeling_data2vec_audio.py b/src/transformers/models/data2vec/modeling_data2vec_audio.py index dd2a676b26c2..3a163e896b1e 100755 --- a/src/transformers/models/data2vec/modeling_data2vec_audio.py +++ b/src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -1762,3 +1762,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Data2VecAudioPreTrainedModel", + "Data2VecAudioModel", + "Data2VecAudioForCTC", + "Data2VecAudioForSequenceClassification", + "Data2VecAudioForAudioFrameClassification", + "Data2VecAudioForXVector", +] diff --git a/src/transformers/models/data2vec/modeling_data2vec_text.py b/src/transformers/models/data2vec/modeling_data2vec_text.py index a41fdfb56ed1..99c2ce860d8a 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_text.py +++ b/src/transformers/models/data2vec/modeling_data2vec_text.py @@ -1559,3 +1559,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "Data2VecTextPreTrainedModel", + "Data2VecTextModel", + "Data2VecTextForCausalLM", + "Data2VecTextForMaskedLM", + "Data2VecTextForSequenceClassification", + "Data2VecTextForMultipleChoice", + "Data2VecTextForTokenClassification", + "Data2VecTextForQuestionAnswering", +] diff --git a/src/transformers/models/data2vec/modeling_data2vec_vision.py b/src/transformers/models/data2vec/modeling_data2vec_vision.py index 4d252ce1f19d..d8134d780b5f 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_data2vec_vision.py @@ -1372,3 +1372,11 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "Data2VecVisionPreTrainedModel", + "Data2VecVisionModel", + "Data2VecVisionForImageClassification", + "Data2VecVisionForSemanticSegmentation", +] diff --git a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py index f95360206bd1..b48f9f6be294 100644 --- a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py @@ -1714,3 +1714,12 @@ def build(self, input_shape=None): if getattr(self, "fpn2", None) is not None: with tf.name_scope(self.fpn2[0].name): self.fpn2[0].build([None, None, None, self.config.hidden_size]) + + +__all__ = [ + "TFData2VecVisionPreTrainedModel", + "TFData2VecVisionModel", + "TFData2VecVisionForImageClassification", + "TFData2VecVisionForSemanticSegmentation", + "TFData2VecVisionMainLayer", +] diff --git a/src/transformers/models/dbrx/__init__.py b/src/transformers/models/dbrx/__init__.py index 693a544c4b3d..cce0f34c778d 100644 --- a/src/transformers/models/dbrx/__init__.py +++ b/src/transformers/models/dbrx/__init__.py @@ -13,39 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_dbrx": ["DbrxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dbrx"] = [ - "DbrxForCausalLM", - "DbrxModel", - "DbrxPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_dbrx import DbrxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel - - + from .configuration_dbrx import * + from .modeling_dbrx import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dbrx/configuration_dbrx.py b/src/transformers/models/dbrx/configuration_dbrx.py index dde5232ae5cc..886f00c930ab 100644 --- a/src/transformers/models/dbrx/configuration_dbrx.py +++ b/src/transformers/models/dbrx/configuration_dbrx.py @@ -256,3 +256,6 @@ def __init__( raise ValueError("tie_word_embeddings is not supported for DBRX models.") super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["DbrxConfig"] diff --git a/src/transformers/models/dbrx/modeling_dbrx.py b/src/transformers/models/dbrx/modeling_dbrx.py index 8db9f6e8b7d0..20200d8bf70c 100644 --- a/src/transformers/models/dbrx/modeling_dbrx.py +++ b/src/transformers/models/dbrx/modeling_dbrx.py @@ -1447,3 +1447,6 @@ def prepare_inputs_for_generation( } ) return model_inputs + + +__all__ = ["DbrxPreTrainedModel", "DbrxModel", "DbrxForCausalLM"] diff --git a/src/transformers/models/deberta/__init__.py b/src/transformers/models/deberta/__init__.py index 76beee798ff0..90109815faf1 100644 --- a/src/transformers/models/deberta/__init__.py +++ b/src/transformers/models/deberta/__init__.py @@ -11,106 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_deberta": ["DebertaConfig", "DebertaOnnxConfig"], - "tokenization_deberta": ["DebertaTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deberta"] = [ - "DebertaForMaskedLM", - "DebertaForQuestionAnswering", - "DebertaForSequenceClassification", - "DebertaForTokenClassification", - "DebertaModel", - "DebertaPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_deberta"] = [ - "TFDebertaForMaskedLM", - "TFDebertaForQuestionAnswering", - "TFDebertaForSequenceClassification", - "TFDebertaForTokenClassification", - "TFDebertaModel", - "TFDebertaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deberta import DebertaConfig, DebertaOnnxConfig - from .tokenization_deberta import DebertaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_deberta_fast import DebertaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deberta import ( - DebertaForMaskedLM, - DebertaForQuestionAnswering, - DebertaForSequenceClassification, - DebertaForTokenClassification, - DebertaModel, - DebertaPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_deberta import ( - TFDebertaForMaskedLM, - TFDebertaForQuestionAnswering, - TFDebertaForSequenceClassification, - TFDebertaForTokenClassification, - TFDebertaModel, - TFDebertaPreTrainedModel, - ) - - + from .configuration_deberta import * + from .modeling_deberta import * + from .modeling_tf_deberta import * + from .tokenization_deberta import * + from .tokenization_deberta_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deberta/configuration_deberta.py b/src/transformers/models/deberta/configuration_deberta.py index f6f17ab2274c..f6ef27c85b26 100644 --- a/src/transformers/models/deberta/configuration_deberta.py +++ b/src/transformers/models/deberta/configuration_deberta.py @@ -23,7 +23,8 @@ if TYPE_CHECKING: - from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType + from ...feature_extraction_utils import FeatureExtractionMixin + from ...tokenization_utils import PreTrainedTokenizerBase, TensorType logger = logging.get_logger(__name__) @@ -189,3 +190,6 @@ def generate_dummy_inputs( if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs + + +__all__ = ["DebertaConfig", "DebertaOnnxConfig"] diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 814d3cb28521..6485ad908713 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -1425,3 +1425,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DebertaPreTrainedModel", + "DebertaModel", + "DebertaForMaskedLM", + "DebertaForSequenceClassification", + "DebertaForTokenClassification", + "DebertaForQuestionAnswering", +] diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index 3fa7bd4504a3..4690fa6cc96b 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -1039,7 +1039,6 @@ def build(self, input_shape=None): self.predictions.build(None) -# @keras_serializable class TFDebertaMainLayer(keras.layers.Layer): config_class = DebertaConfig @@ -1640,3 +1639,14 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFDebertaPreTrainedModel", + "TFDebertaModel", + "TFDebertaForMaskedLM", + "TFDebertaForSequenceClassification", + "TFDebertaForTokenClassification", + "TFDebertaForQuestionAnswering", + "TFDebertaMainLayer", +] diff --git a/src/transformers/models/deberta/tokenization_deberta.py b/src/transformers/models/deberta/tokenization_deberta.py index 371aa9866232..63933c1d2a32 100644 --- a/src/transformers/models/deberta/tokenization_deberta.py +++ b/src/transformers/models/deberta/tokenization_deberta.py @@ -391,3 +391,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["DebertaTokenizer"] diff --git a/src/transformers/models/deberta/tokenization_deberta_fast.py b/src/transformers/models/deberta/tokenization_deberta_fast.py index b28732850b17..39c64d90e533 100644 --- a/src/transformers/models/deberta/tokenization_deberta_fast.py +++ b/src/transformers/models/deberta/tokenization_deberta_fast.py @@ -245,3 +245,6 @@ def _encode_plus(self, *args, **kwargs) -> BatchEncoding: def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["DebertaTokenizerFast"] diff --git a/src/transformers/models/deberta_v2/__init__.py b/src/transformers/models/deberta_v2/__init__.py index 314901aee1ae..ffcb7c646e8c 100644 --- a/src/transformers/models/deberta_v2/__init__.py +++ b/src/transformers/models/deberta_v2/__init__.py @@ -11,112 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_deberta_v2": ["DebertaV2Config", "DebertaV2OnnxConfig"], - "tokenization_deberta_v2": ["DebertaV2Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_deberta_v2_fast"] = ["DebertaV2TokenizerFast"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_deberta_v2"] = [ - "TFDebertaV2ForMaskedLM", - "TFDebertaV2ForQuestionAnswering", - "TFDebertaV2ForMultipleChoice", - "TFDebertaV2ForSequenceClassification", - "TFDebertaV2ForTokenClassification", - "TFDebertaV2Model", - "TFDebertaV2PreTrainedModel", - ] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deberta_v2"] = [ - "DebertaV2ForMaskedLM", - "DebertaV2ForMultipleChoice", - "DebertaV2ForQuestionAnswering", - "DebertaV2ForSequenceClassification", - "DebertaV2ForTokenClassification", - "DebertaV2Model", - "DebertaV2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deberta_v2 import ( - DebertaV2Config, - DebertaV2OnnxConfig, - ) - from .tokenization_deberta_v2 import DebertaV2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_deberta_v2_fast import DebertaV2TokenizerFast - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_deberta_v2 import ( - TFDebertaV2ForMaskedLM, - TFDebertaV2ForMultipleChoice, - TFDebertaV2ForQuestionAnswering, - TFDebertaV2ForSequenceClassification, - TFDebertaV2ForTokenClassification, - TFDebertaV2Model, - TFDebertaV2PreTrainedModel, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deberta_v2 import ( - DebertaV2ForMaskedLM, - DebertaV2ForMultipleChoice, - DebertaV2ForQuestionAnswering, - DebertaV2ForSequenceClassification, - DebertaV2ForTokenClassification, - DebertaV2Model, - DebertaV2PreTrainedModel, - ) - + from .configuration_deberta_v2 import * + from .modeling_deberta_v2 import * + from .modeling_tf_deberta_v2 import * + from .tokenization_deberta_v2 import * + from .tokenization_deberta_v2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deberta_v2/configuration_deberta_v2.py b/src/transformers/models/deberta_v2/configuration_deberta_v2.py index 80ab01241178..ad5c24416207 100644 --- a/src/transformers/models/deberta_v2/configuration_deberta_v2.py +++ b/src/transformers/models/deberta_v2/configuration_deberta_v2.py @@ -23,7 +23,8 @@ if TYPE_CHECKING: - from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType + from ...feature_extraction_utils import FeatureExtractionMixin + from ...tokenization_utils import PreTrainedTokenizerBase, TensorType logger = logging.get_logger(__name__) @@ -188,3 +189,6 @@ def generate_dummy_inputs( if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs + + +__all__ = ["DebertaV2Config", "DebertaV2OnnxConfig"] diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index f47cb86ab52a..f0675bcf107f 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -1628,3 +1628,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DebertaV2PreTrainedModel", + "DebertaV2Model", + "DebertaV2ForMaskedLM", + "DebertaV2ForSequenceClassification", + "DebertaV2ForTokenClassification", + "DebertaV2ForQuestionAnswering", + "DebertaV2ForMultipleChoice", +] diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index fd8032f74794..8c4d1f43e787 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -1868,3 +1868,15 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.output_dim]) + + +__all__ = [ + "TFDebertaV2PreTrainedModel", + "TFDebertaV2Model", + "TFDebertaV2ForMaskedLM", + "TFDebertaV2ForSequenceClassification", + "TFDebertaV2ForTokenClassification", + "TFDebertaV2ForQuestionAnswering", + "TFDebertaV2ForMultipleChoice", + "TFDebertaV2MainLayer", +] diff --git a/src/transformers/models/deberta_v2/tokenization_deberta_v2.py b/src/transformers/models/deberta_v2/tokenization_deberta_v2.py index 6ff689f80a5c..69c6ea587644 100644 --- a/src/transformers/models/deberta_v2/tokenization_deberta_v2.py +++ b/src/transformers/models/deberta_v2/tokenization_deberta_v2.py @@ -22,6 +22,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -30,6 +31,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spm.model"} +@export(backends=("sentencepiece",)) class DebertaV2Tokenizer(PreTrainedTokenizer): r""" Constructs a DeBERTa-v2 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -519,3 +521,6 @@ def convert_to_unicode(text): return text.decode("utf-8", "ignore") else: raise TypeError(f"Unsupported string type: {type(text)}") + + +__all__ = ["DebertaV2Tokenizer"] diff --git a/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py b/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py index cb92a61edf1a..784e82995419 100644 --- a/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py +++ b/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py @@ -218,3 +218,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["DebertaV2TokenizerFast"] diff --git a/src/transformers/models/decision_transformer/__init__.py b/src/transformers/models/decision_transformer/__init__.py index ce97cf7352a7..1b526a2ff766 100644 --- a/src/transformers/models/decision_transformer/__init__.py +++ b/src/transformers/models/decision_transformer/__init__.py @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_decision_transformer": ["DecisionTransformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_decision_transformer"] = [ - "DecisionTransformerGPT2Model", - "DecisionTransformerGPT2PreTrainedModel", - "DecisionTransformerModel", - "DecisionTransformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_decision_transformer import ( - DecisionTransformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_decision_transformer import ( - DecisionTransformerGPT2Model, - DecisionTransformerGPT2PreTrainedModel, - DecisionTransformerModel, - DecisionTransformerPreTrainedModel, - ) - - + from .configuration_decision_transformer import * + from .modeling_decision_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/decision_transformer/configuration_decision_transformer.py b/src/transformers/models/decision_transformer/configuration_decision_transformer.py index 19e89afecbfa..e677206aa089 100644 --- a/src/transformers/models/decision_transformer/configuration_decision_transformer.py +++ b/src/transformers/models/decision_transformer/configuration_decision_transformer.py @@ -152,3 +152,6 @@ def __init__( self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["DecisionTransformerConfig"] diff --git a/src/transformers/models/decision_transformer/modeling_decision_transformer.py b/src/transformers/models/decision_transformer/modeling_decision_transformer.py index b8eb9f5a8b42..a937efcdb1b7 100755 --- a/src/transformers/models/decision_transformer/modeling_decision_transformer.py +++ b/src/transformers/models/decision_transformer/modeling_decision_transformer.py @@ -931,3 +931,11 @@ def forward( hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "DecisionTransformerGPT2PreTrainedModel", + "DecisionTransformerGPT2Model", + "DecisionTransformerPreTrainedModel", + "DecisionTransformerModel", +] diff --git a/src/transformers/models/deformable_detr/__init__.py b/src/transformers/models/deformable_detr/__init__.py index ab44adf37181..5a4879fee505 100644 --- a/src/transformers/models/deformable_detr/__init__.py +++ b/src/transformers/models/deformable_detr/__init__.py @@ -11,63 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_deformable_detr": ["DeformableDetrConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"] - _import_structure["image_processing_deformable_detr"] = ["DeformableDetrImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deformable_detr"] = [ - "DeformableDetrForObjectDetection", - "DeformableDetrModel", - "DeformableDetrPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deformable_detr import DeformableDetrConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor - from .image_processing_deformable_detr import DeformableDetrImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deformable_detr import ( - DeformableDetrForObjectDetection, - DeformableDetrModel, - DeformableDetrPreTrainedModel, - ) - + from .configuration_deformable_detr import * + from .feature_extraction_deformable_detr import * + from .image_processing_deformable_detr import * + from .modeling_deformable_detr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deformable_detr/configuration_deformable_detr.py b/src/transformers/models/deformable_detr/configuration_deformable_detr.py index 495e1154dad3..05bd0f906a11 100644 --- a/src/transformers/models/deformable_detr/configuration_deformable_detr.py +++ b/src/transformers/models/deformable_detr/configuration_deformable_detr.py @@ -277,3 +277,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["DeformableDetrConfig"] diff --git a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py index f04743e91cee..eb338665cf5e 100644 --- a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py +++ b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py @@ -18,6 +18,7 @@ from ...image_transforms import rgb_to_id as _rgb_to_id from ...utils import logging +from ...utils.import_utils import export from .image_processing_deformable_detr import DeformableDetrImageProcessor @@ -33,6 +34,7 @@ def rgb_to_id(x): return _rgb_to_id(x) +@export(backends=("vision",)) class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -41,3 +43,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DeformableDetrFeatureExtractor"] diff --git a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py index 8c149f554965..6ca6feda9d2f 100644 --- a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py +++ b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py @@ -64,6 +64,7 @@ is_vision_available, logging, ) +from ...utils.import_utils import export if is_torch_available(): @@ -799,6 +800,7 @@ def compute_segments( return segmentation, segments +@export(backends=("vision",)) class DeformableDetrImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. @@ -1627,3 +1629,6 @@ def post_process_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["DeformableDetrImageProcessor"] diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py index 46e00787baf6..5a6f381b848b 100755 --- a/src/transformers/models/deformable_detr/modeling_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -2528,3 +2528,6 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask) + + +__all__ = ["DeformableDetrPreTrainedModel", "DeformableDetrModel", "DeformableDetrForObjectDetection"] diff --git a/src/transformers/models/deit/__init__.py b/src/transformers/models/deit/__init__.py index 8248823be24c..7497dc22a57f 100644 --- a/src/transformers/models/deit/__init__.py +++ b/src/transformers/models/deit/__init__.py @@ -13,97 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = {"configuration_deit": ["DeiTConfig", "DeiTOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_deit"] = ["DeiTFeatureExtractor"] - _import_structure["image_processing_deit"] = ["DeiTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deit"] = [ - "DeiTForImageClassification", - "DeiTForImageClassificationWithTeacher", - "DeiTForMaskedImageModeling", - "DeiTModel", - "DeiTPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_deit"] = [ - "TFDeiTForImageClassification", - "TFDeiTForImageClassificationWithTeacher", - "TFDeiTForMaskedImageModeling", - "TFDeiTModel", - "TFDeiTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deit import DeiTConfig, DeiTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_deit import DeiTFeatureExtractor - from .image_processing_deit import DeiTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deit import ( - DeiTForImageClassification, - DeiTForImageClassificationWithTeacher, - DeiTForMaskedImageModeling, - DeiTModel, - DeiTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_deit import ( - TFDeiTForImageClassification, - TFDeiTForImageClassificationWithTeacher, - TFDeiTForMaskedImageModeling, - TFDeiTModel, - TFDeiTPreTrainedModel, - ) - - + from .configuration_deit import * + from .feature_extraction_deit import * + from .image_processing_deit import * + from .modeling_deit import * + from .modeling_tf_deit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deit/configuration_deit.py b/src/transformers/models/deit/configuration_deit.py index 3784ed76ab2a..d135144a2a40 100644 --- a/src/transformers/models/deit/configuration_deit.py +++ b/src/transformers/models/deit/configuration_deit.py @@ -137,3 +137,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["DeiTConfig", "DeiTOnnxConfig"] diff --git a/src/transformers/models/deit/feature_extraction_deit.py b/src/transformers/models/deit/feature_extraction_deit.py index b66922ea9575..65f7e052c277 100644 --- a/src/transformers/models/deit/feature_extraction_deit.py +++ b/src/transformers/models/deit/feature_extraction_deit.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_deit import DeiTImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class DeiTFeatureExtractor(DeiTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DeiTFeatureExtractor"] diff --git a/src/transformers/models/deit/image_processing_deit.py b/src/transformers/models/deit/image_processing_deit.py index bafb5f6e71ad..2c7ddec9ff1c 100644 --- a/src/transformers/models/deit/image_processing_deit.py +++ b/src/transformers/models/deit/image_processing_deit.py @@ -34,6 +34,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -43,6 +44,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class DeiTImageProcessor(BaseImageProcessor): r""" Constructs a DeiT image processor. @@ -294,3 +296,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["DeiTImageProcessor"] diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 03194c15d98f..4c9f5644b8b0 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -994,3 +994,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DeiTPreTrainedModel", + "DeiTModel", + "DeiTForMaskedImageModeling", + "DeiTForImageClassification", + "DeiTForImageClassificationWithTeacher", +] diff --git a/src/transformers/models/deit/modeling_tf_deit.py b/src/transformers/models/deit/modeling_tf_deit.py index 03ad1385d34c..961add294ae1 100644 --- a/src/transformers/models/deit/modeling_tf_deit.py +++ b/src/transformers/models/deit/modeling_tf_deit.py @@ -1222,3 +1222,13 @@ def build(self, input_shape=None): if getattr(self, "distillation_classifier", None) is not None: with tf.name_scope(self.distillation_classifier.name): self.distillation_classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFDeiTPreTrainedModel", + "TFDeiTModel", + "TFDeiTForMaskedImageModeling", + "TFDeiTForImageClassification", + "TFDeiTForImageClassificationWithTeacher", + "TFDeiTMainLayer", +] diff --git a/src/transformers/models/deprecated/deta/__init__.py b/src/transformers/models/deprecated/deta/__init__.py index ab54ec6f4391..6e06e1867419 100644 --- a/src/transformers/models/deprecated/deta/__init__.py +++ b/src/transformers/models/deprecated/deta/__init__.py @@ -11,61 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_deta": ["DetaConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_deta"] = ["DetaImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_deta"] = [ - "DetaForObjectDetection", - "DetaModel", - "DetaPreTrainedModel", - ] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_deta import DetaConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_deta import DetaImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_deta import ( - DetaForObjectDetection, - DetaModel, - DetaPreTrainedModel, - ) - + from .configuration_deta import * + from .image_processing_deta import * + from .modeling_deta import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/deta/configuration_deta.py b/src/transformers/models/deprecated/deta/configuration_deta.py index fcee8fc62abf..558bf5967907 100644 --- a/src/transformers/models/deprecated/deta/configuration_deta.py +++ b/src/transformers/models/deprecated/deta/configuration_deta.py @@ -265,3 +265,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["DetaConfig"] diff --git a/src/transformers/models/deprecated/deta/image_processing_deta.py b/src/transformers/models/deprecated/deta/image_processing_deta.py index a548590ce12c..5785f64f2226 100644 --- a/src/transformers/models/deprecated/deta/image_processing_deta.py +++ b/src/transformers/models/deprecated/deta/image_processing_deta.py @@ -60,6 +60,7 @@ logging, ) from ....utils.generic import TensorType +from ....utils.import_utils import export if is_torch_available(): @@ -494,6 +495,7 @@ def resize_annotation( return new_annotation +@export(backends=("vision",)) class DetaImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. @@ -1222,3 +1224,6 @@ def post_process_object_detection( ) return results + + +__all__ = ["DetaImageProcessor"] diff --git a/src/transformers/models/deprecated/deta/modeling_deta.py b/src/transformers/models/deprecated/deta/modeling_deta.py index 075b490cfa7b..e7c80ff51b0e 100644 --- a/src/transformers/models/deprecated/deta/modeling_deta.py +++ b/src/transformers/models/deprecated/deta/modeling_deta.py @@ -2822,3 +2822,6 @@ def forward(self, outputs, targets): def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k) + + +__all__ = ["DetaPreTrainedModel", "DetaModel", "DetaForObjectDetection"] diff --git a/src/transformers/models/deprecated/efficientformer/__init__.py b/src/transformers/models/deprecated/efficientformer/__init__.py index 67d046a8b6fc..4bca57afdadd 100644 --- a/src/transformers/models/deprecated/efficientformer/__init__.py +++ b/src/transformers/models/deprecated/efficientformer/__init__.py @@ -20,81 +20,16 @@ is_torch_available, is_vision_available, ) +from ....utils.import_utils import define_import_structure -_import_structure = {"configuration_efficientformer": ["EfficientFormerConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_efficientformer"] = ["EfficientFormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_efficientformer"] = [ - "EfficientFormerForImageClassification", - "EfficientFormerForImageClassificationWithTeacher", - "EfficientFormerModel", - "EfficientFormerPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_efficientformer"] = [ - "TFEfficientFormerForImageClassification", - "TFEfficientFormerForImageClassificationWithTeacher", - "TFEfficientFormerModel", - "TFEfficientFormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_efficientformer import EfficientFormerConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_efficientformer import EfficientFormerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_efficientformer import ( - EfficientFormerForImageClassification, - EfficientFormerForImageClassificationWithTeacher, - EfficientFormerModel, - EfficientFormerPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_efficientformer import ( - TFEfficientFormerForImageClassification, - TFEfficientFormerForImageClassificationWithTeacher, - TFEfficientFormerModel, - TFEfficientFormerPreTrainedModel, - ) - + from .configuration_efficientformer import * + from .image_processing_efficientformer import * + from .modeling_efficientformer import * + from .modeling_tf_efficientformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/efficientformer/configuration_efficientformer.py b/src/transformers/models/deprecated/efficientformer/configuration_efficientformer.py index fb161d61fcbc..abc4446a2716 100644 --- a/src/transformers/models/deprecated/efficientformer/configuration_efficientformer.py +++ b/src/transformers/models/deprecated/efficientformer/configuration_efficientformer.py @@ -165,3 +165,6 @@ def __init__( self.layer_scale_init_value = layer_scale_init_value self.image_size = image_size self.batch_norm_eps = batch_norm_eps + + +__all__ = ["EfficientFormerConfig"] diff --git a/src/transformers/models/deprecated/efficientformer/image_processing_efficientformer.py b/src/transformers/models/deprecated/efficientformer/image_processing_efficientformer.py index 15fdf04051c1..3b434fdcde1c 100644 --- a/src/transformers/models/deprecated/efficientformer/image_processing_efficientformer.py +++ b/src/transformers/models/deprecated/efficientformer/image_processing_efficientformer.py @@ -39,11 +39,13 @@ validate_preprocess_arguments, ) from ....utils import TensorType, logging +from ....utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class EfficientFormerImageProcessor(BaseImageProcessor): r""" Constructs a EfficientFormer image processor. @@ -319,3 +321,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["EfficientFormerImageProcessor"] diff --git a/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py b/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py index 306790021a7b..7fe308278e2b 100644 --- a/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py +++ b/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py @@ -797,3 +797,11 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "EfficientFormerPreTrainedModel", + "EfficientFormerModel", + "EfficientFormerForImageClassification", + "EfficientFormerForImageClassificationWithTeacher", +] diff --git a/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py b/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py index d47d06e7837c..97872d7c03e6 100644 --- a/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py +++ b/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py @@ -1188,3 +1188,12 @@ def build(self, input_shape=None): if hasattr(self.distillation_classifier, "name"): with tf.name_scope(self.distillation_classifier.name): self.distillation_classifier.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = [ + "TFEfficientFormerPreTrainedModel", + "TFEfficientFormerModel", + "TFEfficientFormerForImageClassification", + "TFEfficientFormerForImageClassificationWithTeacher", + "TFEfficientFormerMainLayer", +] diff --git a/src/transformers/models/deprecated/ernie_m/__init__.py b/src/transformers/models/deprecated/ernie_m/__init__.py index 68964d7574fc..975faa359b5a 100644 --- a/src/transformers/models/deprecated/ernie_m/__init__.py +++ b/src/transformers/models/deprecated/ernie_m/__init__.py @@ -15,66 +15,15 @@ # rely on isort to merge the imports from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available - - -_import_structure = { - "configuration_ernie_m": ["ErnieMConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_ernie_m"] = ["ErnieMTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ernie_m"] = [ - "ErnieMForMultipleChoice", - "ErnieMForQuestionAnswering", - "ErnieMForSequenceClassification", - "ErnieMForTokenClassification", - "ErnieMModel", - "ErnieMPreTrainedModel", - "ErnieMForInformationExtraction", - ] +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_ernie_m import ErnieMConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_ernie_m import ErnieMTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ernie_m import ( - ErnieMForInformationExtraction, - ErnieMForMultipleChoice, - ErnieMForQuestionAnswering, - ErnieMForSequenceClassification, - ErnieMForTokenClassification, - ErnieMModel, - ErnieMPreTrainedModel, - ) - - + from .configuration_ernie_m import * + from .modeling_ernie_m import * + from .tokenization_ernie_m import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py b/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py index d5c3feb951a3..7a4510613185 100644 --- a/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py +++ b/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py @@ -109,3 +109,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.act_dropout = act_dropout + + +__all__ = ["ErnieMConfig"] diff --git a/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py b/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py index 68d270874c91..2674e9fb3c72 100755 --- a/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py +++ b/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py @@ -962,6 +962,7 @@ def forward( compute `start_prob` and `end_prob`, designed for Universal Information Extraction.""", ERNIE_M_START_DOCSTRING, ) +# Copied from paddlenlp.transformers.ernie_m.modeling.UIEM class ErnieMForInformationExtraction(ErnieMPreTrainedModel): def __init__(self, config): super(ErnieMForInformationExtraction, self).__init__(config) @@ -1045,3 +1046,14 @@ def forward( hidden_states=result.hidden_states, attentions=result.attentions, ) + + +__all__ = [ + "ErnieMPreTrainedModel", + "ErnieMModel", + "ErnieMForSequenceClassification", + "ErnieMForMultipleChoice", + "ErnieMForTokenClassification", + "ErnieMForQuestionAnswering", + "ErnieMForInformationExtraction", +] diff --git a/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py b/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py index 07f9f4ed4738..0d6c4b67126f 100644 --- a/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py +++ b/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py @@ -23,6 +23,7 @@ from ....tokenization_utils import PreTrainedTokenizer from ....utils import logging +from ....utils.import_utils import export logger = logging.get_logger(__name__) @@ -38,6 +39,7 @@ # Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer +@export(backends=("sentencepiece",)) class ErnieMTokenizer(PreTrainedTokenizer): r""" Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words. @@ -403,3 +405,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (vocab_file,) + + +__all__ = ["ErnieMTokenizer"] diff --git a/src/transformers/models/deprecated/gptsan_japanese/__init__.py b/src/transformers/models/deprecated/gptsan_japanese/__init__.py index 5bd0f99840ca..4ff26e55ea50 100644 --- a/src/transformers/models/deprecated/gptsan_japanese/__init__.py +++ b/src/transformers/models/deprecated/gptsan_japanese/__init__.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ....utils import ( @@ -21,48 +20,15 @@ is_tf_available, is_torch_available, ) - - -_import_structure = { - "configuration_gptsan_japanese": ["GPTSanJapaneseConfig"], - "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gptsan_japanese"] = [ - "GPTSanJapaneseForConditionalGeneration", - "GPTSanJapaneseModel", - "GPTSanJapanesePreTrainedModel", - ] - _import_structure["tokenization_gptsan_japanese"] = [ - "GPTSanJapaneseTokenizer", - ] +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gptsan_japanese import GPTSanJapaneseConfig - from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gptsan_japanese import ( - GPTSanJapaneseForConditionalGeneration, - GPTSanJapaneseModel, - GPTSanJapanesePreTrainedModel, - ) - from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer - - + from .configuration_gptsan_japanese import * + from .modeling_gptsan_japanese import * + from .tokenization_gptsan_japanese import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py b/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py index 52bd33ac9ff3..cd5658100959 100644 --- a/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py +++ b/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py @@ -152,3 +152,6 @@ def __init__( eos_token_id=eos_token_id, **kwargs, ) + + +__all__ = ["GPTSanJapaneseConfig"] diff --git a/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py index c7a195dbea0e..e864201f9364 100644 --- a/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py @@ -1330,3 +1330,6 @@ def _unpack_router_logits(self, router_outputs): total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1) + + +__all__ = ["GPTSanJapanesePreTrainedModel", "GPTSanJapaneseModel", "GPTSanJapaneseForConditionalGeneration"] diff --git a/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py b/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py index f1331da83eec..b0707162c2ce 100644 --- a/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py +++ b/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py @@ -498,3 +498,6 @@ def checku2e(x): def convert_id_to_token(self, index): return self.ids_to_tokens[index][0] + + +__all__ = ["GPTSanJapaneseTokenizer"] diff --git a/src/transformers/models/deprecated/graphormer/__init__.py b/src/transformers/models/deprecated/graphormer/__init__.py index 117bf7c15a8a..3a4b3eb1be2b 100644 --- a/src/transformers/models/deprecated/graphormer/__init__.py +++ b/src/transformers/models/deprecated/graphormer/__init__.py @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_graphormer": ["GraphormerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_graphormer"] = [ - "GraphormerForGraphClassification", - "GraphormerModel", - "GraphormerPreTrainedModel", - ] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_graphormer import GraphormerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_graphormer import ( - GraphormerForGraphClassification, - GraphormerModel, - GraphormerPreTrainedModel, - ) - - + from .configuration_graphormer import * + from .modeling_graphormer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/graphormer/configuration_graphormer.py b/src/transformers/models/deprecated/graphormer/configuration_graphormer.py index 058ef9d03a40..3a542b17e37c 100644 --- a/src/transformers/models/deprecated/graphormer/configuration_graphormer.py +++ b/src/transformers/models/deprecated/graphormer/configuration_graphormer.py @@ -213,3 +213,6 @@ def __init__( eos_token_id=eos_token_id, **kwargs, ) + + +__all__ = ["GraphormerConfig"] diff --git a/src/transformers/models/deprecated/graphormer/modeling_graphormer.py b/src/transformers/models/deprecated/graphormer/modeling_graphormer.py index 0eb4aa71194c..6b29eaf06216 100755 --- a/src/transformers/models/deprecated/graphormer/modeling_graphormer.py +++ b/src/transformers/models/deprecated/graphormer/modeling_graphormer.py @@ -906,3 +906,6 @@ def forward( if not return_dict: return tuple(x for x in [loss, logits, hidden_states] if x is not None) return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None) + + +__all__ = ["GraphormerPreTrainedModel", "GraphormerModel", "GraphormerForGraphClassification"] diff --git a/src/transformers/models/deprecated/jukebox/__init__.py b/src/transformers/models/deprecated/jukebox/__init__.py index d6de90638905..826bdbddc1f1 100644 --- a/src/transformers/models/deprecated/jukebox/__init__.py +++ b/src/transformers/models/deprecated/jukebox/__init__.py @@ -11,56 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_jukebox": [ - "JukeboxConfig", - "JukeboxPriorConfig", - "JukeboxVQVAEConfig", - ], - "tokenization_jukebox": ["JukeboxTokenizer"], -} +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_jukebox"] = [ - "JukeboxModel", - "JukeboxPreTrainedModel", - "JukeboxVQVAE", - "JukeboxPrior", - ] if TYPE_CHECKING: - from .configuration_jukebox import ( - JukeboxConfig, - JukeboxPriorConfig, - JukeboxVQVAEConfig, - ) - from .tokenization_jukebox import JukeboxTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_jukebox import ( - JukeboxModel, - JukeboxPreTrainedModel, - JukeboxPrior, - JukeboxVQVAE, - ) - + from .configuration_jukebox import * + from .modeling_jukebox import * + from .tokenization_jukebox import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/jukebox/configuration_jukebox.py b/src/transformers/models/deprecated/jukebox/configuration_jukebox.py index e9d08c478f30..7e7438fdb593 100644 --- a/src/transformers/models/deprecated/jukebox/configuration_jukebox.py +++ b/src/transformers/models/deprecated/jukebox/configuration_jukebox.py @@ -608,3 +608,6 @@ def to_dict(self): result = super().to_dict() result["prior_config_list"] = [config.to_dict() for config in result.pop("prior_configs")] return result + + +__all__ = ["JukeboxPriorConfig", "JukeboxVQVAEConfig", "JukeboxConfig"] diff --git a/src/transformers/models/deprecated/jukebox/modeling_jukebox.py b/src/transformers/models/deprecated/jukebox/modeling_jukebox.py index 6688c79e71a2..27c14a653351 100755 --- a/src/transformers/models/deprecated/jukebox/modeling_jukebox.py +++ b/src/transformers/models/deprecated/jukebox/modeling_jukebox.py @@ -2661,3 +2661,6 @@ def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> List[torch.Long ) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens + + +__all__ = ["JukeboxVQVAE", "JukeboxPrior", "JukeboxPreTrainedModel", "JukeboxModel"] diff --git a/src/transformers/models/deprecated/jukebox/tokenization_jukebox.py b/src/transformers/models/deprecated/jukebox/tokenization_jukebox.py index fb827fbca9b4..e08ab179a807 100644 --- a/src/transformers/models/deprecated/jukebox/tokenization_jukebox.py +++ b/src/transformers/models/deprecated/jukebox/tokenization_jukebox.py @@ -402,3 +402,6 @@ def _convert_id_to_token(self, artists_index, genres_index, lyric_index): genres = [self.genres_decoder.get(genre) for genre in genres_index] lyrics = [self.lyrics_decoder.get(character) for character in lyric_index] return artist, genres, lyrics + + +__all__ = ["JukeboxTokenizer"] diff --git a/src/transformers/models/deprecated/mega/__init__.py b/src/transformers/models/deprecated/mega/__init__.py index 1774d3bae4ea..cff2c19505f9 100644 --- a/src/transformers/models/deprecated/mega/__init__.py +++ b/src/transformers/models/deprecated/mega/__init__.py @@ -11,58 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ....utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_mega": ["MegaConfig", "MegaOnnxConfig"], -} +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mega"] = [ - "MegaForCausalLM", - "MegaForMaskedLM", - "MegaForMultipleChoice", - "MegaForQuestionAnswering", - "MegaForSequenceClassification", - "MegaForTokenClassification", - "MegaModel", - "MegaPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_mega import MegaConfig, MegaOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mega import ( - MegaForCausalLM, - MegaForMaskedLM, - MegaForMultipleChoice, - MegaForQuestionAnswering, - MegaForSequenceClassification, - MegaForTokenClassification, - MegaModel, - MegaPreTrainedModel, - ) - + from .configuration_mega import * + from .modeling_mega import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/mega/configuration_mega.py b/src/transformers/models/deprecated/mega/configuration_mega.py index 0b1ab53d5f65..3b9d53d52079 100644 --- a/src/transformers/models/deprecated/mega/configuration_mega.py +++ b/src/transformers/models/deprecated/mega/configuration_mega.py @@ -238,3 +238,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["MegaConfig", "MegaOnnxConfig"] diff --git a/src/transformers/models/deprecated/mega/modeling_mega.py b/src/transformers/models/deprecated/mega/modeling_mega.py index 32f37dde5349..620786234c06 100644 --- a/src/transformers/models/deprecated/mega/modeling_mega.py +++ b/src/transformers/models/deprecated/mega/modeling_mega.py @@ -2271,3 +2271,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MegaPreTrainedModel", + "MegaModel", + "MegaForCausalLM", + "MegaForMaskedLM", + "MegaForSequenceClassification", + "MegaForMultipleChoice", + "MegaForTokenClassification", + "MegaForQuestionAnswering", +] diff --git a/src/transformers/models/deprecated/nat/__init__.py b/src/transformers/models/deprecated/nat/__init__.py index 70d2cfd2951a..c5373969ce78 100644 --- a/src/transformers/models/deprecated/nat/__init__.py +++ b/src/transformers/models/deprecated/nat/__init__.py @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure -_import_structure = {"configuration_nat": ["NatConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nat"] = [ - "NatForImageClassification", - "NatModel", - "NatPreTrainedModel", - "NatBackbone", - ] - if TYPE_CHECKING: - from .configuration_nat import NatConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nat import ( - NatBackbone, - NatForImageClassification, - NatModel, - NatPreTrainedModel, - ) - + from .configuration_nat import * + from .modeling_nat import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/nat/configuration_nat.py b/src/transformers/models/deprecated/nat/configuration_nat.py index 2fef74d2a016..85961aa2fe8d 100644 --- a/src/transformers/models/deprecated/nat/configuration_nat.py +++ b/src/transformers/models/deprecated/nat/configuration_nat.py @@ -143,3 +143,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["NatConfig"] diff --git a/src/transformers/models/deprecated/nat/modeling_nat.py b/src/transformers/models/deprecated/nat/modeling_nat.py index b3827f3787ef..585889cb1730 100644 --- a/src/transformers/models/deprecated/nat/modeling_nat.py +++ b/src/transformers/models/deprecated/nat/modeling_nat.py @@ -948,3 +948,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["NatPreTrainedModel", "NatModel", "NatForImageClassification", "NatBackbone"] diff --git a/src/transformers/models/deprecated/nezha/__init__.py b/src/transformers/models/deprecated/nezha/__init__.py index 590b0013c52d..f0690129ae9e 100644 --- a/src/transformers/models/deprecated/nezha/__init__.py +++ b/src/transformers/models/deprecated/nezha/__init__.py @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_nezha": ["NezhaConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nezha"] = [ - "NezhaForNextSentencePrediction", - "NezhaForMaskedLM", - "NezhaForPreTraining", - "NezhaForMultipleChoice", - "NezhaForQuestionAnswering", - "NezhaForSequenceClassification", - "NezhaForTokenClassification", - "NezhaModel", - "NezhaPreTrainedModel", - ] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_nezha import NezhaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nezha import ( - NezhaForMaskedLM, - NezhaForMultipleChoice, - NezhaForNextSentencePrediction, - NezhaForPreTraining, - NezhaForQuestionAnswering, - NezhaForSequenceClassification, - NezhaForTokenClassification, - NezhaModel, - NezhaPreTrainedModel, - ) - - + from .configuration_nezha import * + from .modeling_nezha import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/nezha/configuration_nezha.py b/src/transformers/models/deprecated/nezha/configuration_nezha.py index c60bb5de51f4..da750bcb23ec 100644 --- a/src/transformers/models/deprecated/nezha/configuration_nezha.py +++ b/src/transformers/models/deprecated/nezha/configuration_nezha.py @@ -1,4 +1,4 @@ -from .... import PretrainedConfig +from ....configuration_utils import PretrainedConfig class NezhaConfig(PretrainedConfig): @@ -100,3 +100,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.use_cache = use_cache + + +__all__ = ["NezhaConfig"] diff --git a/src/transformers/models/deprecated/nezha/modeling_nezha.py b/src/transformers/models/deprecated/nezha/modeling_nezha.py index 3346a4f835a3..2c9aa3a7ce7f 100644 --- a/src/transformers/models/deprecated/nezha/modeling_nezha.py +++ b/src/transformers/models/deprecated/nezha/modeling_nezha.py @@ -1682,3 +1682,16 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "NezhaPreTrainedModel", + "NezhaModel", + "NezhaForPreTraining", + "NezhaForMaskedLM", + "NezhaForNextSentencePrediction", + "NezhaForSequenceClassification", + "NezhaForMultipleChoice", + "NezhaForTokenClassification", + "NezhaForQuestionAnswering", +] diff --git a/src/transformers/models/deprecated/qdqbert/__init__.py b/src/transformers/models/deprecated/qdqbert/__init__.py index 06e69cdc1fd5..864b321bc2ee 100644 --- a/src/transformers/models/deprecated/qdqbert/__init__.py +++ b/src/transformers/models/deprecated/qdqbert/__init__.py @@ -13,57 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_qdqbert": ["QDQBertConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qdqbert"] = [ - "QDQBertForMaskedLM", - "QDQBertForMultipleChoice", - "QDQBertForNextSentencePrediction", - "QDQBertForQuestionAnswering", - "QDQBertForSequenceClassification", - "QDQBertForTokenClassification", - "QDQBertLayer", - "QDQBertLMHeadModel", - "QDQBertModel", - "QDQBertPreTrainedModel", - "load_tf_weights_in_qdqbert", - ] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qdqbert import QDQBertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qdqbert import ( - QDQBertForMaskedLM, - QDQBertForMultipleChoice, - QDQBertForNextSentencePrediction, - QDQBertForQuestionAnswering, - QDQBertForSequenceClassification, - QDQBertForTokenClassification, - QDQBertLayer, - QDQBertLMHeadModel, - QDQBertModel, - QDQBertPreTrainedModel, - load_tf_weights_in_qdqbert, - ) - - + from .configuration_qdqbert import * + from .modeling_qdqbert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/qdqbert/configuration_qdqbert.py b/src/transformers/models/deprecated/qdqbert/configuration_qdqbert.py index b2ba629b2407..91ac82bc5a02 100644 --- a/src/transformers/models/deprecated/qdqbert/configuration_qdqbert.py +++ b/src/transformers/models/deprecated/qdqbert/configuration_qdqbert.py @@ -118,3 +118,6 @@ def __init__( self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache + + +__all__ = ["QDQBertConfig"] diff --git a/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py b/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py index 036ca99c73b5..b907465d4fdd 100755 --- a/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py +++ b/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py @@ -718,6 +718,8 @@ def forward(self, sequence_output, pooled_output): # Based on transformers.models.bert.modeling_bert.BertPreTrainedModel with Bert -> QDQBert + + class QDQBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1732,3 +1734,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_qdqbert", + "QDQBertPreTrainedModel", + "QDQBertModel", + "QDQBertLMHeadModel", + "QDQBertForMaskedLM", + "QDQBertForNextSentencePrediction", + "QDQBertForSequenceClassification", + "QDQBertForMultipleChoice", + "QDQBertForTokenClassification", + "QDQBertForQuestionAnswering", +] diff --git a/src/transformers/models/deprecated/realm/__init__.py b/src/transformers/models/deprecated/realm/__init__.py index 85fe72441fd1..cdfdeb5d179c 100644 --- a/src/transformers/models/deprecated/realm/__init__.py +++ b/src/transformers/models/deprecated/realm/__init__.py @@ -13,71 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_realm": ["RealmConfig"], - "tokenization_realm": ["RealmTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_realm_fast"] = ["RealmTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_realm"] = [ - "RealmEmbedder", - "RealmForOpenQA", - "RealmKnowledgeAugEncoder", - "RealmPreTrainedModel", - "RealmReader", - "RealmScorer", - "load_tf_weights_in_realm", - ] - _import_structure["retrieval_realm"] = ["RealmRetriever"] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_realm import RealmConfig - from .tokenization_realm import RealmTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_realm import RealmTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_realm import ( - RealmEmbedder, - RealmForOpenQA, - RealmKnowledgeAugEncoder, - RealmPreTrainedModel, - RealmReader, - RealmScorer, - load_tf_weights_in_realm, - ) - from .retrieval_realm import RealmRetriever - - + from .configuration_realm import * + from .modeling_realm import * + from .retrieval_realm import * + from .tokenization_realm import * + from .tokenization_realm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/realm/configuration_realm.py b/src/transformers/models/deprecated/realm/configuration_realm.py index 20fd201d98f1..fbf32378a604 100644 --- a/src/transformers/models/deprecated/realm/configuration_realm.py +++ b/src/transformers/models/deprecated/realm/configuration_realm.py @@ -164,3 +164,6 @@ def __init__( # Retrieval config self.num_block_records = num_block_records self.searcher_beam_size = searcher_beam_size + + +__all__ = ["RealmConfig"] diff --git a/src/transformers/models/deprecated/realm/modeling_realm.py b/src/transformers/models/deprecated/realm/modeling_realm.py index 67eb94c6c4e8..66d79f0b4066 100644 --- a/src/transformers/models/deprecated/realm/modeling_realm.py +++ b/src/transformers/models/deprecated/realm/modeling_realm.py @@ -1849,3 +1849,14 @@ def forward( reader_output=reader_output, predicted_answer_ids=predicted_answer_ids, ) + + +__all__ = [ + "load_tf_weights_in_realm", + "RealmPreTrainedModel", + "RealmEmbedder", + "RealmScorer", + "RealmKnowledgeAugEncoder", + "RealmReader", + "RealmForOpenQA", +] diff --git a/src/transformers/models/deprecated/realm/retrieval_realm.py b/src/transformers/models/deprecated/realm/retrieval_realm.py index 4bfa2106c65c..8c749d2e8b81 100644 --- a/src/transformers/models/deprecated/realm/retrieval_realm.py +++ b/src/transformers/models/deprecated/realm/retrieval_realm.py @@ -162,3 +162,6 @@ def block_has_answer(self, concat_inputs, answer_ids): start_pos_ += padded end_pos_ += padded return has_answers, start_pos, end_pos + + +__all__ = ["RealmRetriever"] diff --git a/src/transformers/models/deprecated/realm/tokenization_realm.py b/src/transformers/models/deprecated/realm/tokenization_realm.py index 8211c1aee870..70e69bc4bc2b 100644 --- a/src/transformers/models/deprecated/realm/tokenization_realm.py +++ b/src/transformers/models/deprecated/realm/tokenization_realm.py @@ -558,3 +558,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["RealmTokenizer"] diff --git a/src/transformers/models/deprecated/realm/tokenization_realm_fast.py b/src/transformers/models/deprecated/realm/tokenization_realm_fast.py index cbc4869e549e..7c173227befd 100644 --- a/src/transformers/models/deprecated/realm/tokenization_realm_fast.py +++ b/src/transformers/models/deprecated/realm/tokenization_realm_fast.py @@ -247,3 +247,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["RealmTokenizerFast"] diff --git a/src/transformers/models/deprecated/speech_to_text_2/__init__.py b/src/transformers/models/deprecated/speech_to_text_2/__init__.py index 53f806d00c68..78c549b6e294 100644 --- a/src/transformers/models/deprecated/speech_to_text_2/__init__.py +++ b/src/transformers/models/deprecated/speech_to_text_2/__init__.py @@ -13,51 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_speech_available, - is_torch_available, -) - - -_import_structure = { - "configuration_speech_to_text_2": ["Speech2Text2Config"], - "processing_speech_to_text_2": ["Speech2Text2Processor"], - "tokenization_speech_to_text_2": ["Speech2Text2Tokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speech_to_text_2"] = [ - "Speech2Text2ForCausalLM", - "Speech2Text2PreTrainedModel", - ] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_speech_to_text_2 import Speech2Text2Config - from .processing_speech_to_text_2 import Speech2Text2Processor - from .tokenization_speech_to_text_2 import Speech2Text2Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speech_to_text_2 import ( - Speech2Text2ForCausalLM, - Speech2Text2PreTrainedModel, - ) - + from .configuration_speech_to_text_2 import * + from .modeling_speech_to_text_2 import * + from .processing_speech_to_text_2 import * + from .tokenization_speech_to_text_2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py b/src/transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py index d876c4fc3ecf..2afd79feb28d 100644 --- a/src/transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py +++ b/src/transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py @@ -129,3 +129,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["Speech2Text2Config"] diff --git a/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py b/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py index 8f1a8370933c..68a41169ac41 100755 --- a/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py +++ b/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py @@ -921,3 +921,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["Speech2Text2PreTrainedModel", "Speech2Text2ForCausalLM"] diff --git a/src/transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py b/src/transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py index ce8527e4a72e..7ac144e0993f 100644 --- a/src/transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py +++ b/src/transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py @@ -114,3 +114,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Speech2Text2Processor"] diff --git a/src/transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py b/src/transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py index 2eefe449151b..f5aa7ef8067c 100644 --- a/src/transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py +++ b/src/transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py @@ -247,3 +247,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return (vocab_file, merges_file) + + +__all__ = ["Speech2Text2Tokenizer"] diff --git a/src/transformers/models/deprecated/tvlt/__init__.py b/src/transformers/models/deprecated/tvlt/__init__.py index 0a2f1e393494..f5c8dc7862da 100644 --- a/src/transformers/models/deprecated/tvlt/__init__.py +++ b/src/transformers/models/deprecated/tvlt/__init__.py @@ -1,86 +1,20 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -from ....utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_tvlt": ["TvltConfig"], - "feature_extraction_tvlt": ["TvltFeatureExtractor"], - "processing_tvlt": ["TvltProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tvlt"] = [ - "TvltModel", - "TvltForPreTraining", - "TvltForAudioVisualClassification", - "TvltPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_tvlt"] = ["TvltImageProcessor"] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_tvlt import TvltConfig - from .processing_tvlt import TvltProcessor - from .feature_extraction_tvlt import TvltFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tvlt import ( - TvltForAudioVisualClassification, - TvltForPreTraining, - TvltModel, - TvltPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_tvlt import TvltImageProcessor - - + from .processing_tvlt import * + from .configuration_tvlt import * + from .feature_extraction_tvlt import * + from .modeling_tvlt import * + from .image_processing_tvlt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/tvlt/configuration_tvlt.py b/src/transformers/models/deprecated/tvlt/configuration_tvlt.py index bc9c133beca3..bf159fa7e0b7 100644 --- a/src/transformers/models/deprecated/tvlt/configuration_tvlt.py +++ b/src/transformers/models/deprecated/tvlt/configuration_tvlt.py @@ -182,3 +182,6 @@ def __init__( self.task_matching = task_matching self.task_mae = task_mae self.loss_type = loss_type + + +__all__ = ["TvltConfig"] diff --git a/src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py b/src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py index 2d41af33e548..bbbfac9031b9 100644 --- a/src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py +++ b/src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py @@ -228,3 +228,6 @@ def __call__( encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs + + +__all__ = ["TvltFeatureExtractor"] diff --git a/src/transformers/models/deprecated/tvlt/image_processing_tvlt.py b/src/transformers/models/deprecated/tvlt/image_processing_tvlt.py index 009f8307d475..32da572f3929 100644 --- a/src/transformers/models/deprecated/tvlt/image_processing_tvlt.py +++ b/src/transformers/models/deprecated/tvlt/image_processing_tvlt.py @@ -39,6 +39,7 @@ validate_preprocess_arguments, ) from ....utils import TensorType, logging +from ....utils.import_utils import export logger = logging.get_logger(__name__) @@ -67,6 +68,7 @@ def make_batched(videos) -> List[List[ImageInput]]: raise ValueError(f"Could not make batched video from {videos}") +@export(backends=("vision",)) class TvltImageProcessor(BaseImageProcessor): r""" Constructs a TVLT image processor. @@ -433,3 +435,6 @@ def preprocess( data = {"pixel_values": videos, "pixel_mask": video_masks} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["TvltImageProcessor"] diff --git a/src/transformers/models/deprecated/tvlt/modeling_tvlt.py b/src/transformers/models/deprecated/tvlt/modeling_tvlt.py index 7f82aacf6e8b..ba18f901ba6d 100644 --- a/src/transformers/models/deprecated/tvlt/modeling_tvlt.py +++ b/src/transformers/models/deprecated/tvlt/modeling_tvlt.py @@ -1286,3 +1286,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["TvltPreTrainedModel", "TvltModel", "TvltForPreTraining", "TvltForAudioVisualClassification"] diff --git a/src/transformers/models/deprecated/tvlt/processing_tvlt.py b/src/transformers/models/deprecated/tvlt/processing_tvlt.py index da9c755b55ed..d9f8e0978d8a 100644 --- a/src/transformers/models/deprecated/tvlt/processing_tvlt.py +++ b/src/transformers/models/deprecated/tvlt/processing_tvlt.py @@ -87,3 +87,6 @@ def model_input_names(self): image_processor_input_names = self.image_processor.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names)) + + +__all__ = ["TvltProcessor"] diff --git a/src/transformers/models/deprecated/vit_hybrid/__init__.py b/src/transformers/models/deprecated/vit_hybrid/__init__.py index d0f9c5831d84..f5bd93aa4dab 100644 --- a/src/transformers/models/deprecated/vit_hybrid/__init__.py +++ b/src/transformers/models/deprecated/vit_hybrid/__init__.py @@ -13,57 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_vit_hybrid": ["ViTHybridConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit_hybrid"] = [ - "ViTHybridForImageClassification", - "ViTHybridModel", - "ViTHybridPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vit_hybrid"] = ["ViTHybridImageProcessor"] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vit_hybrid import ViTHybridConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit_hybrid import ( - ViTHybridForImageClassification, - ViTHybridModel, - ViTHybridPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vit_hybrid import ViTHybridImageProcessor - - + from .configuration_vit_hybrid import * + from .image_processing_vit_hybrid import * + from .modeling_vit_hybrid import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/vit_hybrid/configuration_vit_hybrid.py b/src/transformers/models/deprecated/vit_hybrid/configuration_vit_hybrid.py index c0e4244a5a2b..65b6a3e5ef51 100644 --- a/src/transformers/models/deprecated/vit_hybrid/configuration_vit_hybrid.py +++ b/src/transformers/models/deprecated/vit_hybrid/configuration_vit_hybrid.py @@ -167,3 +167,6 @@ def __init__( self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias + + +__all__ = ["ViTHybridConfig"] diff --git a/src/transformers/models/deprecated/vit_hybrid/image_processing_vit_hybrid.py b/src/transformers/models/deprecated/vit_hybrid/image_processing_vit_hybrid.py index e7c3193ceab4..2c0586dbcf34 100644 --- a/src/transformers/models/deprecated/vit_hybrid/image_processing_vit_hybrid.py +++ b/src/transformers/models/deprecated/vit_hybrid/image_processing_vit_hybrid.py @@ -40,6 +40,7 @@ validate_preprocess_arguments, ) from ....utils import TensorType, is_vision_available, logging +from ....utils.import_utils import export logger = logging.get_logger(__name__) @@ -49,6 +50,7 @@ import PIL +@export(backends=("vision",)) class ViTHybridImageProcessor(BaseImageProcessor): r""" Constructs a ViT Hybrid image processor. @@ -336,3 +338,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ViTHybridImageProcessor"] diff --git a/src/transformers/models/deprecated/vit_hybrid/modeling_vit_hybrid.py b/src/transformers/models/deprecated/vit_hybrid/modeling_vit_hybrid.py index dca17adf2b09..09c37506b633 100644 --- a/src/transformers/models/deprecated/vit_hybrid/modeling_vit_hybrid.py +++ b/src/transformers/models/deprecated/vit_hybrid/modeling_vit_hybrid.py @@ -765,3 +765,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTHybridPreTrainedModel", "ViTHybridModel", "ViTHybridForImageClassification"] diff --git a/src/transformers/models/deprecated/xlm_prophetnet/__init__.py b/src/transformers/models/deprecated/xlm_prophetnet/__init__.py index 850d2958cb49..c13c67012fa1 100644 --- a/src/transformers/models/deprecated/xlm_prophetnet/__init__.py +++ b/src/transformers/models/deprecated/xlm_prophetnet/__init__.py @@ -13,64 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available - - -_import_structure = { - "configuration_xlm_prophetnet": ["XLMProphetNetConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlm_prophetnet"] = ["XLMProphetNetTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm_prophetnet"] = [ - "XLMProphetNetDecoder", - "XLMProphetNetEncoder", - "XLMProphetNetForCausalLM", - "XLMProphetNetForConditionalGeneration", - "XLMProphetNetModel", - "XLMProphetNetPreTrainedModel", - ] +from ....utils import _LazyModule +from ....utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xlm_prophetnet import XLMProphetNetConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm_prophetnet import ( - XLMProphetNetDecoder, - XLMProphetNetEncoder, - XLMProphetNetForCausalLM, - XLMProphetNetForConditionalGeneration, - XLMProphetNetModel, - XLMProphetNetPreTrainedModel, - ) - + from .configuration_xlm_prophetnet import * + from .modeling_xlm_prophetnet import * + from .tokenization_xlm_prophetnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/deprecated/xlm_prophetnet/configuration_xlm_prophetnet.py b/src/transformers/models/deprecated/xlm_prophetnet/configuration_xlm_prophetnet.py index 5d3f63670f0c..2d7751d9541e 100644 --- a/src/transformers/models/deprecated/xlm_prophetnet/configuration_xlm_prophetnet.py +++ b/src/transformers/models/deprecated/xlm_prophetnet/configuration_xlm_prophetnet.py @@ -176,3 +176,6 @@ def num_hidden_layers(self, value): "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." ) + + +__all__ = ["XLMProphetNetConfig"] diff --git a/src/transformers/models/deprecated/xlm_prophetnet/modeling_xlm_prophetnet.py b/src/transformers/models/deprecated/xlm_prophetnet/modeling_xlm_prophetnet.py index e9e709af993d..770ba7e42ca1 100644 --- a/src/transformers/models/deprecated/xlm_prophetnet/modeling_xlm_prophetnet.py +++ b/src/transformers/models/deprecated/xlm_prophetnet/modeling_xlm_prophetnet.py @@ -2334,3 +2334,13 @@ def _tie_weights(self): def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) + + +__all__ = [ + "XLMProphetNetPreTrainedModel", + "XLMProphetNetEncoder", + "XLMProphetNetDecoder", + "XLMProphetNetModel", + "XLMProphetNetForConditionalGeneration", + "XLMProphetNetForCausalLM", +] diff --git a/src/transformers/models/deprecated/xlm_prophetnet/tokenization_xlm_prophetnet.py b/src/transformers/models/deprecated/xlm_prophetnet/tokenization_xlm_prophetnet.py index 87f458001988..d5939a807e57 100644 --- a/src/transformers/models/deprecated/xlm_prophetnet/tokenization_xlm_prophetnet.py +++ b/src/transformers/models/deprecated/xlm_prophetnet/tokenization_xlm_prophetnet.py @@ -20,6 +20,7 @@ from ....tokenization_utils import PreTrainedTokenizer from ....utils import logging +from ....utils.import_utils import export logger = logging.get_logger(__name__) @@ -40,6 +41,7 @@ def load_vocab(vocab_file): return vocab +@export(backends=("sentencepiece",)) class XLMProphetNetTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on @@ -321,3 +323,6 @@ def build_inputs_with_special_tokens( return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep + + +__all__ = ["XLMProphetNetTokenizer"] diff --git a/src/transformers/models/depth_anything/__init__.py b/src/transformers/models/depth_anything/__init__.py index 0640e211259f..7425e37e0399 100644 --- a/src/transformers/models/depth_anything/__init__.py +++ b/src/transformers/models/depth_anything/__init__.py @@ -13,40 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_depth_anything": ["DepthAnythingConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_depth_anything"] = [ - "DepthAnythingForDepthEstimation", - "DepthAnythingPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_depth_anything import DepthAnythingConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_depth_anything import ( - DepthAnythingForDepthEstimation, - DepthAnythingPreTrainedModel, - ) - - + from .configuration_depth_anything import * + from .modeling_depth_anything import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/depth_anything/configuration_depth_anything.py b/src/transformers/models/depth_anything/configuration_depth_anything.py index e1b472bdce19..3bbe621a4431 100644 --- a/src/transformers/models/depth_anything/configuration_depth_anything.py +++ b/src/transformers/models/depth_anything/configuration_depth_anything.py @@ -163,3 +163,6 @@ def to_dict(self): output["model_type"] = self.__class__.model_type return output + + +__all__ = ["DepthAnythingConfig"] diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index e24b38be6466..e6f6345ac403 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -465,3 +465,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["DepthAnythingPreTrainedModel", "DepthAnythingForDepthEstimation"] diff --git a/src/transformers/models/detr/__init__.py b/src/transformers/models/detr/__init__.py index 422fe98230be..41223f07da18 100644 --- a/src/transformers/models/detr/__init__.py +++ b/src/transformers/models/detr/__init__.py @@ -11,63 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_detr": ["DetrConfig", "DetrOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_detr"] = ["DetrFeatureExtractor"] - _import_structure["image_processing_detr"] = ["DetrImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_detr"] = [ - "DetrForObjectDetection", - "DetrForSegmentation", - "DetrModel", - "DetrPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_detr import DetrConfig, DetrOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_detr import DetrFeatureExtractor - from .image_processing_detr import DetrImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_detr import ( - DetrForObjectDetection, - DetrForSegmentation, - DetrModel, - DetrPreTrainedModel, - ) - + from .configuration_detr import * + from .feature_extraction_detr import * + from .image_processing_detr import * + from .modeling_detr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index 8b4a5b08dab2..90cd3b1345e3 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -284,3 +284,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["DetrConfig", "DetrOnnxConfig"] diff --git a/src/transformers/models/detr/feature_extraction_detr.py b/src/transformers/models/detr/feature_extraction_detr.py index 6ea33666466f..42adea16ea3c 100644 --- a/src/transformers/models/detr/feature_extraction_detr.py +++ b/src/transformers/models/detr/feature_extraction_detr.py @@ -18,6 +18,7 @@ from ...image_transforms import rgb_to_id as _rgb_to_id from ...utils import logging +from ...utils.import_utils import export from .image_processing_detr import DetrImageProcessor @@ -33,6 +34,7 @@ def rgb_to_id(x): return _rgb_to_id(x) +@export(backends=("vision",)) class DetrFeatureExtractor(DetrImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -41,3 +43,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DetrFeatureExtractor"] diff --git a/src/transformers/models/detr/image_processing_detr.py b/src/transformers/models/detr/image_processing_detr.py index 10d1b4d5d4a5..fb337385b026 100644 --- a/src/transformers/models/detr/image_processing_detr.py +++ b/src/transformers/models/detr/image_processing_detr.py @@ -63,6 +63,7 @@ is_vision_available, logging, ) +from ...utils.import_utils import export if is_torch_available(): @@ -784,6 +785,7 @@ def compute_segments( return segmentation, segments +@export(backends=("vision",)) class DetrImageProcessor(BaseImageProcessor): r""" Constructs a Detr image processor. @@ -2042,3 +2044,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["DetrImageProcessor"] diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index c3c1c033e556..cb0999a4f4b6 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -2328,3 +2328,6 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask) + + +__all__ = ["DetrPreTrainedModel", "DetrModel", "DetrForObjectDetection", "DetrForSegmentation"] diff --git a/src/transformers/models/dinat/__init__.py b/src/transformers/models/dinat/__init__.py index 207ebfdaa869..a6c8481bc208 100644 --- a/src/transformers/models/dinat/__init__.py +++ b/src/transformers/models/dinat/__init__.py @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_dinat": ["DinatConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dinat"] = [ - "DinatForImageClassification", - "DinatModel", - "DinatPreTrainedModel", - "DinatBackbone", - ] - if TYPE_CHECKING: - from .configuration_dinat import DinatConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dinat import ( - DinatBackbone, - DinatForImageClassification, - DinatModel, - DinatPreTrainedModel, - ) - + from .configuration_dinat import * + from .modeling_dinat import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dinat/configuration_dinat.py b/src/transformers/models/dinat/configuration_dinat.py index 220561152b35..7b432e37c851 100644 --- a/src/transformers/models/dinat/configuration_dinat.py +++ b/src/transformers/models/dinat/configuration_dinat.py @@ -147,3 +147,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["DinatConfig"] diff --git a/src/transformers/models/dinat/modeling_dinat.py b/src/transformers/models/dinat/modeling_dinat.py index 18f8725da861..3a125169f431 100644 --- a/src/transformers/models/dinat/modeling_dinat.py +++ b/src/transformers/models/dinat/modeling_dinat.py @@ -955,3 +955,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["DinatPreTrainedModel", "DinatModel", "DinatForImageClassification", "DinatBackbone"] diff --git a/src/transformers/models/dinov2/__init__.py b/src/transformers/models/dinov2/__init__.py index 1bb4a4597b9a..4e1f3fa9ee29 100644 --- a/src/transformers/models/dinov2/__init__.py +++ b/src/transformers/models/dinov2/__init__.py @@ -13,70 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_dinov2": ["Dinov2Config", "Dinov2OnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dinov2"] = [ - "Dinov2ForImageClassification", - "Dinov2Model", - "Dinov2PreTrainedModel", - "Dinov2Backbone", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_dinov2"] = [ - "FlaxDinov2ForImageClassification", - "FlaxDinov2Model", - "FlaxDinov2PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_dinov2 import Dinov2Config, Dinov2OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dinov2 import ( - Dinov2Backbone, - Dinov2ForImageClassification, - Dinov2Model, - Dinov2PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_dinov2 import ( - FlaxDinov2ForImageClassification, - FlaxDinov2Model, - FlaxDinov2PreTrainedModel, - ) - + from .configuration_dinov2 import * + from .modeling_dinov2 import * + from .modeling_flax_dinov2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dinov2/configuration_dinov2.py b/src/transformers/models/dinov2/configuration_dinov2.py index 2df883de1699..bb9cd7820d10 100644 --- a/src/transformers/models/dinov2/configuration_dinov2.py +++ b/src/transformers/models/dinov2/configuration_dinov2.py @@ -170,3 +170,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["Dinov2Config", "Dinov2OnnxConfig"] diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index 160c5ae69f39..0f025d6884e8 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -858,3 +858,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions if output_attentions else None, ) + + +__all__ = ["Dinov2PreTrainedModel", "Dinov2Model", "Dinov2ForImageClassification", "Dinov2Backbone"] diff --git a/src/transformers/models/distilbert/__init__.py b/src/transformers/models/distilbert/__init__.py index 7d6586bfa508..1cd798618905 100644 --- a/src/transformers/models/distilbert/__init__.py +++ b/src/transformers/models/distilbert/__init__.py @@ -11,150 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_distilbert": [ - "DistilBertConfig", - "DistilBertOnnxConfig", - ], - "tokenization_distilbert": ["DistilBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_distilbert"] = [ - "DistilBertForMaskedLM", - "DistilBertForMultipleChoice", - "DistilBertForQuestionAnswering", - "DistilBertForSequenceClassification", - "DistilBertForTokenClassification", - "DistilBertModel", - "DistilBertPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_distilbert"] = [ - "TFDistilBertForMaskedLM", - "TFDistilBertForMultipleChoice", - "TFDistilBertForQuestionAnswering", - "TFDistilBertForSequenceClassification", - "TFDistilBertForTokenClassification", - "TFDistilBertMainLayer", - "TFDistilBertModel", - "TFDistilBertPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_distilbert"] = [ - "FlaxDistilBertForMaskedLM", - "FlaxDistilBertForMultipleChoice", - "FlaxDistilBertForQuestionAnswering", - "FlaxDistilBertForSequenceClassification", - "FlaxDistilBertForTokenClassification", - "FlaxDistilBertModel", - "FlaxDistilBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_distilbert import ( - DistilBertConfig, - DistilBertOnnxConfig, - ) - from .tokenization_distilbert import DistilBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_distilbert_fast import DistilBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_distilbert import ( - DistilBertForMaskedLM, - DistilBertForMultipleChoice, - DistilBertForQuestionAnswering, - DistilBertForSequenceClassification, - DistilBertForTokenClassification, - DistilBertModel, - DistilBertPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_distilbert import ( - TFDistilBertForMaskedLM, - TFDistilBertForMultipleChoice, - TFDistilBertForQuestionAnswering, - TFDistilBertForSequenceClassification, - TFDistilBertForTokenClassification, - TFDistilBertMainLayer, - TFDistilBertModel, - TFDistilBertPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_distilbert import ( - FlaxDistilBertForMaskedLM, - FlaxDistilBertForMultipleChoice, - FlaxDistilBertForQuestionAnswering, - FlaxDistilBertForSequenceClassification, - FlaxDistilBertForTokenClassification, - FlaxDistilBertModel, - FlaxDistilBertPreTrainedModel, - ) - + from .configuration_distilbert import * + from .modeling_distilbert import * + from .modeling_flax_distilbert import * + from .modeling_tf_distilbert import * + from .tokenization_distilbert import * + from .tokenization_distilbert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/distilbert/configuration_distilbert.py b/src/transformers/models/distilbert/configuration_distilbert.py index a2ce1a2419db..9a28c8e5d03d 100644 --- a/src/transformers/models/distilbert/configuration_distilbert.py +++ b/src/transformers/models/distilbert/configuration_distilbert.py @@ -136,3 +136,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["DistilBertConfig", "DistilBertOnnxConfig"] diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index e80e3c41d22c..b2a6cf682d35 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -492,6 +492,8 @@ def forward( # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # + + class DistilBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1271,3 +1273,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "DistilBertPreTrainedModel", + "DistilBertModel", + "DistilBertForMaskedLM", + "DistilBertForSequenceClassification", + "DistilBertForQuestionAnswering", + "DistilBertForTokenClassification", + "DistilBertForMultipleChoice", +] diff --git a/src/transformers/models/distilbert/modeling_flax_distilbert.py b/src/transformers/models/distilbert/modeling_flax_distilbert.py index 0cb7cdb033c1..259c9eb69dbd 100644 --- a/src/transformers/models/distilbert/modeling_flax_distilbert.py +++ b/src/transformers/models/distilbert/modeling_flax_distilbert.py @@ -893,3 +893,13 @@ class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel): FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxDistilBertPreTrainedModel", + "FlaxDistilBertModel", + "FlaxDistilBertForMaskedLM", + "FlaxDistilBertForSequenceClassification", + "FlaxDistilBertForMultipleChoice", + "FlaxDistilBertForTokenClassification", + "FlaxDistilBertForQuestionAnswering", +] diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index 87dab93ca16f..9d0a0b8ba97c 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -477,6 +477,8 @@ def build(self, input_shape=None): # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # + + class TFDistilBertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1133,3 +1135,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.dim]) + + +__all__ = [ + "TFDistilBertPreTrainedModel", + "TFDistilBertModel", + "TFDistilBertForMaskedLM", + "TFDistilBertForSequenceClassification", + "TFDistilBertForTokenClassification", + "TFDistilBertForMultipleChoice", + "TFDistilBertForQuestionAnswering", + "TFDistilBertMainLayer", +] diff --git a/src/transformers/models/distilbert/tokenization_distilbert.py b/src/transformers/models/distilbert/tokenization_distilbert.py index 87b1eb192e4a..f652794acd21 100644 --- a/src/transformers/models/distilbert/tokenization_distilbert.py +++ b/src/transformers/models/distilbert/tokenization_distilbert.py @@ -512,3 +512,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["DistilBertTokenizer"] diff --git a/src/transformers/models/distilbert/tokenization_distilbert_fast.py b/src/transformers/models/distilbert/tokenization_distilbert_fast.py index f1d69a27d67c..d3829763d5e7 100644 --- a/src/transformers/models/distilbert/tokenization_distilbert_fast.py +++ b/src/transformers/models/distilbert/tokenization_distilbert_fast.py @@ -174,3 +174,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["DistilBertTokenizerFast"] diff --git a/src/transformers/models/donut/__init__.py b/src/transformers/models/donut/__init__.py index f6f38609e6ff..9553a34e8ed3 100644 --- a/src/transformers/models/donut/__init__.py +++ b/src/transformers/models/donut/__init__.py @@ -13,60 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_donut_swin": ["DonutSwinConfig"], - "processing_donut": ["DonutProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_donut_swin"] = [ - "DonutSwinModel", - "DonutSwinPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"] - _import_structure["image_processing_donut"] = ["DonutImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_donut_swin import DonutSwinConfig - from .processing_donut import DonutProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_donut_swin import ( - DonutSwinModel, - DonutSwinPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_donut import DonutFeatureExtractor - from .image_processing_donut import DonutImageProcessor - + from .configuration_donut_swin import * + from .feature_extraction_donut import * + from .image_processing_donut import * + from .modeling_donut_swin import * + from .processing_donut import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/donut/configuration_donut_swin.py b/src/transformers/models/donut/configuration_donut_swin.py index b9f9fae39cef..9aac07dace76 100644 --- a/src/transformers/models/donut/configuration_donut_swin.py +++ b/src/transformers/models/donut/configuration_donut_swin.py @@ -130,3 +130,6 @@ def __init__( # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + + +__all__ = ["DonutSwinConfig"] diff --git a/src/transformers/models/donut/feature_extraction_donut.py b/src/transformers/models/donut/feature_extraction_donut.py index e6ca078c0e8a..3077285f62e0 100644 --- a/src/transformers/models/donut/feature_extraction_donut.py +++ b/src/transformers/models/donut/feature_extraction_donut.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_donut import DonutImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class DonutFeatureExtractor(DonutImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DonutFeatureExtractor"] diff --git a/src/transformers/models/donut/image_processing_donut.py b/src/transformers/models/donut/image_processing_donut.py index edb0629d44bd..efc33621f300 100644 --- a/src/transformers/models/donut/image_processing_donut.py +++ b/src/transformers/models/donut/image_processing_donut.py @@ -40,7 +40,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging -from ...utils.import_utils import is_vision_available +from ...utils.import_utils import export, is_vision_available logger = logging.get_logger(__name__) @@ -50,6 +50,7 @@ import PIL +@export(backends=("vision",)) class DonutImageProcessor(BaseImageProcessor): r""" Constructs a Donut image processor. @@ -457,3 +458,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["DonutImageProcessor"] diff --git a/src/transformers/models/donut/modeling_donut_swin.py b/src/transformers/models/donut/modeling_donut_swin.py index 8d639131b841..8fe119ab0142 100644 --- a/src/transformers/models/donut/modeling_donut_swin.py +++ b/src/transformers/models/donut/modeling_donut_swin.py @@ -1005,3 +1005,6 @@ def forward( attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) + + +__all__ = ["DonutSwinPreTrainedModel", "DonutSwinModel"] diff --git a/src/transformers/models/donut/processing_donut.py b/src/transformers/models/donut/processing_donut.py index daf6e7d1dfe4..03c773404424 100644 --- a/src/transformers/models/donut/processing_donut.py +++ b/src/transformers/models/donut/processing_donut.py @@ -195,3 +195,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["DonutProcessor"] diff --git a/src/transformers/models/dpr/__init__.py b/src/transformers/models/dpr/__init__.py index ef4bccee54d2..9445087332ef 100644 --- a/src/transformers/models/dpr/__init__.py +++ b/src/transformers/models/dpr/__init__.py @@ -11,126 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_dpr": ["DPRConfig"], - "tokenization_dpr": [ - "DPRContextEncoderTokenizer", - "DPRQuestionEncoderTokenizer", - "DPRReaderOutput", - "DPRReaderTokenizer", - ], -} - - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_dpr_fast"] = [ - "DPRContextEncoderTokenizerFast", - "DPRQuestionEncoderTokenizerFast", - "DPRReaderTokenizerFast", - ] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dpr"] = [ - "DPRContextEncoder", - "DPRPretrainedContextEncoder", - "DPRPreTrainedModel", - "DPRPretrainedQuestionEncoder", - "DPRPretrainedReader", - "DPRQuestionEncoder", - "DPRReader", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_dpr"] = [ - "TFDPRContextEncoder", - "TFDPRPretrainedContextEncoder", - "TFDPRPretrainedQuestionEncoder", - "TFDPRPretrainedReader", - "TFDPRQuestionEncoder", - "TFDPRReader", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_dpr import DPRConfig - from .tokenization_dpr import ( - DPRContextEncoderTokenizer, - DPRQuestionEncoderTokenizer, - DPRReaderOutput, - DPRReaderTokenizer, - ) - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_dpr_fast import ( - DPRContextEncoderTokenizerFast, - DPRQuestionEncoderTokenizerFast, - DPRReaderTokenizerFast, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dpr import ( - DPRContextEncoder, - DPRPretrainedContextEncoder, - DPRPreTrainedModel, - DPRPretrainedQuestionEncoder, - DPRPretrainedReader, - DPRQuestionEncoder, - DPRReader, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_dpr import ( - TFDPRContextEncoder, - TFDPRPretrainedContextEncoder, - TFDPRPretrainedQuestionEncoder, - TFDPRPretrainedReader, - TFDPRQuestionEncoder, - TFDPRReader, - ) - + from .configuration_dpr import * + from .modeling_dpr import * + from .modeling_tf_dpr import * + from .tokenization_dpr import * + from .tokenization_dpr_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dpr/configuration_dpr.py b/src/transformers/models/dpr/configuration_dpr.py index b22da23ca4cb..7e4b97c97a4f 100644 --- a/src/transformers/models/dpr/configuration_dpr.py +++ b/src/transformers/models/dpr/configuration_dpr.py @@ -126,3 +126,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.projection_dim = projection_dim self.position_embedding_type = position_embedding_type + + +__all__ = ["DPRConfig"] diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 7ba63f134ccc..2fd7fbcd3a0f 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -655,3 +655,15 @@ def forward( output_hidden_states=output_hidden_states, return_dict=return_dict, ) + + +__all__ = [ + "DPRPreTrainedModel", + "DPRPretrainedContextEncoder", + "DPRPretrainedQuestionEncoder", + "DPRPretrainedReader", + "DPRContextEncoder", + "DPRQuestionEncoder", + "DPRReader", + "DPRReaderOutput", +] diff --git a/src/transformers/models/dpr/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py index 92a0e54cbba5..9721d0091e37 100644 --- a/src/transformers/models/dpr/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -788,3 +788,13 @@ def build(self, input_shape=None): if getattr(self, "span_predictor", None) is not None: with tf.name_scope(self.span_predictor.name): self.span_predictor.build(None) + + +__all__ = [ + "TFDPRPretrainedContextEncoder", + "TFDPRPretrainedQuestionEncoder", + "TFDPRPretrainedReader", + "TFDPRContextEncoder", + "TFDPRQuestionEncoder", + "TFDPRReader", +] diff --git a/src/transformers/models/dpr/tokenization_dpr.py b/src/transformers/models/dpr/tokenization_dpr.py index 45ce73425f23..2660ae3ca843 100644 --- a/src/transformers/models/dpr/tokenization_dpr.py +++ b/src/transformers/models/dpr/tokenization_dpr.py @@ -316,3 +316,10 @@ class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] + + +__all__ = [ + "DPRContextEncoderTokenizer", + "DPRQuestionEncoderTokenizer", + "DPRReaderTokenizer", +] diff --git a/src/transformers/models/dpr/tokenization_dpr_fast.py b/src/transformers/models/dpr/tokenization_dpr_fast.py index 69ac58a77dc1..3b42a933541e 100644 --- a/src/transformers/models/dpr/tokenization_dpr_fast.py +++ b/src/transformers/models/dpr/tokenization_dpr_fast.py @@ -316,3 +316,10 @@ class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = DPRReaderTokenizer + + +__all__ = [ + "DPRContextEncoderTokenizerFast", + "DPRQuestionEncoderTokenizerFast", + "DPRReaderTokenizerFast", +] diff --git a/src/transformers/models/dpt/__init__.py b/src/transformers/models/dpt/__init__.py index ef8999d5efba..4c9a04f157c7 100644 --- a/src/transformers/models/dpt/__init__.py +++ b/src/transformers/models/dpt/__init__.py @@ -13,62 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_dpt": ["DPTConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_dpt"] = ["DPTFeatureExtractor"] - _import_structure["image_processing_dpt"] = ["DPTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dpt"] = [ - "DPTForDepthEstimation", - "DPTForSemanticSegmentation", - "DPTModel", - "DPTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_dpt import DPTConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_dpt import DPTFeatureExtractor - from .image_processing_dpt import DPTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dpt import ( - DPTForDepthEstimation, - DPTForSemanticSegmentation, - DPTModel, - DPTPreTrainedModel, - ) - - + from .configuration_dpt import * + from .feature_extraction_dpt import * + from .image_processing_dpt import * + from .modeling_dpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/dpt/configuration_dpt.py b/src/transformers/models/dpt/configuration_dpt.py index 869f384f5698..516f8f43f0d2 100644 --- a/src/transformers/models/dpt/configuration_dpt.py +++ b/src/transformers/models/dpt/configuration_dpt.py @@ -281,3 +281,6 @@ def to_dict(self): output["model_type"] = self.__class__.model_type return output + + +__all__ = ["DPTConfig"] diff --git a/src/transformers/models/dpt/feature_extraction_dpt.py b/src/transformers/models/dpt/feature_extraction_dpt.py index d375d8229f5e..e6989d7f0c4a 100644 --- a/src/transformers/models/dpt/feature_extraction_dpt.py +++ b/src/transformers/models/dpt/feature_extraction_dpt.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_dpt import DPTImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class DPTFeatureExtractor(DPTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["DPTFeatureExtractor"] diff --git a/src/transformers/models/dpt/image_processing_dpt.py b/src/transformers/models/dpt/image_processing_dpt.py index a263d8a51f42..481a8b32d6ac 100644 --- a/src/transformers/models/dpt/image_processing_dpt.py +++ b/src/transformers/models/dpt/image_processing_dpt.py @@ -38,6 +38,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_torch_available(): @@ -92,6 +93,7 @@ def constrain_to_multiple_of(val, multiple, min_val=0, max_val=None): return (new_height, new_width) +@export(backends=("vision",)) class DPTImageProcessor(BaseImageProcessor): r""" Constructs a DPT image processor. @@ -461,3 +463,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["DPTImageProcessor"] diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index 1587493643e9..487608a6b7c3 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -1372,3 +1372,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["DPTPreTrainedModel", "DPTModel", "DPTForDepthEstimation", "DPTForSemanticSegmentation"] diff --git a/src/transformers/models/efficientnet/__init__.py b/src/transformers/models/efficientnet/__init__.py index 28cb70490d96..afdf99ee351a 100644 --- a/src/transformers/models/efficientnet/__init__.py +++ b/src/transformers/models/efficientnet/__init__.py @@ -1,80 +1,18 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_efficientnet": [ - "EfficientNetConfig", - "EfficientNetOnnxConfig", - ] -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_efficientnet"] = ["EfficientNetImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_efficientnet"] = [ - "EfficientNetForImageClassification", - "EfficientNetModel", - "EfficientNetPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_efficientnet import ( - EfficientNetConfig, - EfficientNetOnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_efficientnet import EfficientNetImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_efficientnet import ( - EfficientNetForImageClassification, - EfficientNetModel, - EfficientNetPreTrainedModel, - ) - + from .image_processing_efficientnet import * + from .modeling_efficientnet import * + from .configuration_efficientnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/efficientnet/configuration_efficientnet.py b/src/transformers/models/efficientnet/configuration_efficientnet.py index 4c7feb377fb9..ef25447d6aef 100644 --- a/src/transformers/models/efficientnet/configuration_efficientnet.py +++ b/src/transformers/models/efficientnet/configuration_efficientnet.py @@ -164,3 +164,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-5 + + +__all__ = ["EfficientNetConfig", "EfficientNetOnnxConfig"] diff --git a/src/transformers/models/efficientnet/image_processing_efficientnet.py b/src/transformers/models/efficientnet/image_processing_efficientnet.py index 3383fff9b0e8..ac56ab9de251 100644 --- a/src/transformers/models/efficientnet/image_processing_efficientnet.py +++ b/src/transformers/models/efficientnet/image_processing_efficientnet.py @@ -34,6 +34,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -43,6 +44,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class EfficientNetImageProcessor(BaseImageProcessor): r""" Constructs a EfficientNet image processor. @@ -364,3 +366,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["EfficientNetImageProcessor"] diff --git a/src/transformers/models/efficientnet/modeling_efficientnet.py b/src/transformers/models/efficientnet/modeling_efficientnet.py index 057cd42f2a37..ab683d951440 100644 --- a/src/transformers/models/efficientnet/modeling_efficientnet.py +++ b/src/transformers/models/efficientnet/modeling_efficientnet.py @@ -642,3 +642,6 @@ def forward( logits=logits, hidden_states=outputs.hidden_states, ) + + +__all__ = ["EfficientNetPreTrainedModel", "EfficientNetModel", "EfficientNetForImageClassification"] diff --git a/src/transformers/models/electra/__init__.py b/src/transformers/models/electra/__init__.py index b79f2410bf35..56045f7cb84a 100644 --- a/src/transformers/models/electra/__init__.py +++ b/src/transformers/models/electra/__init__.py @@ -11,154 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_electra": ["ElectraConfig", "ElectraOnnxConfig"], - "tokenization_electra": ["ElectraTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_electra"] = [ - "ElectraForCausalLM", - "ElectraForMaskedLM", - "ElectraForMultipleChoice", - "ElectraForPreTraining", - "ElectraForQuestionAnswering", - "ElectraForSequenceClassification", - "ElectraForTokenClassification", - "ElectraModel", - "ElectraPreTrainedModel", - "load_tf_weights_in_electra", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_electra"] = [ - "TFElectraForMaskedLM", - "TFElectraForMultipleChoice", - "TFElectraForPreTraining", - "TFElectraForQuestionAnswering", - "TFElectraForSequenceClassification", - "TFElectraForTokenClassification", - "TFElectraModel", - "TFElectraPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_electra"] = [ - "FlaxElectraForCausalLM", - "FlaxElectraForMaskedLM", - "FlaxElectraForMultipleChoice", - "FlaxElectraForPreTraining", - "FlaxElectraForQuestionAnswering", - "FlaxElectraForSequenceClassification", - "FlaxElectraForTokenClassification", - "FlaxElectraModel", - "FlaxElectraPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_electra import ElectraConfig, ElectraOnnxConfig - from .tokenization_electra import ElectraTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_electra_fast import ElectraTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_electra import ( - ElectraForCausalLM, - ElectraForMaskedLM, - ElectraForMultipleChoice, - ElectraForPreTraining, - ElectraForQuestionAnswering, - ElectraForSequenceClassification, - ElectraForTokenClassification, - ElectraModel, - ElectraPreTrainedModel, - load_tf_weights_in_electra, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_electra import ( - TFElectraForMaskedLM, - TFElectraForMultipleChoice, - TFElectraForPreTraining, - TFElectraForQuestionAnswering, - TFElectraForSequenceClassification, - TFElectraForTokenClassification, - TFElectraModel, - TFElectraPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_electra import ( - FlaxElectraForCausalLM, - FlaxElectraForMaskedLM, - FlaxElectraForMultipleChoice, - FlaxElectraForPreTraining, - FlaxElectraForQuestionAnswering, - FlaxElectraForSequenceClassification, - FlaxElectraForTokenClassification, - FlaxElectraModel, - FlaxElectraPreTrainedModel, - ) - + from .configuration_electra import * + from .modeling_electra import * + from .modeling_flax_electra import * + from .modeling_tf_electra import * + from .tokenization_electra import * + from .tokenization_electra_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/electra/configuration_electra.py b/src/transformers/models/electra/configuration_electra.py index 17be728ed65b..20b242c0f8d6 100644 --- a/src/transformers/models/electra/configuration_electra.py +++ b/src/transformers/models/electra/configuration_electra.py @@ -182,3 +182,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["ElectraConfig", "ElectraOnnxConfig"] diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index dd017170bef9..67ad0a1e7d15 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -1681,3 +1681,17 @@ def _reorder_cache(self, past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "load_tf_weights_in_electra", + "ElectraPreTrainedModel", + "ElectraModel", + "ElectraForSequenceClassification", + "ElectraForPreTraining", + "ElectraForMaskedLM", + "ElectraForTokenClassification", + "ElectraForQuestionAnswering", + "ElectraForMultipleChoice", + "ElectraForCausalLM", +] diff --git a/src/transformers/models/electra/modeling_flax_electra.py b/src/transformers/models/electra/modeling_flax_electra.py index 64d49eb17a46..3c01cabd51c3 100644 --- a/src/transformers/models/electra/modeling_flax_electra.py +++ b/src/transformers/models/electra/modeling_flax_electra.py @@ -1599,3 +1599,15 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxElectraPreTrainedModel", + "FlaxElectraModel", + "FlaxElectraForMaskedLM", + "FlaxElectraForPreTraining", + "FlaxElectraForTokenClassification", + "FlaxElectraForMultipleChoice", + "FlaxElectraForQuestionAnswering", + "FlaxElectraForSequenceClassification", + "FlaxElectraForCausalLM", +] diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index a289bb9728fd..726a79b654ab 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -1762,3 +1762,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFElectraPreTrainedModel", + "TFElectraModel", + "TFElectraForPreTraining", + "TFElectraForMaskedLM", + "TFElectraForSequenceClassification", + "TFElectraForMultipleChoice", + "TFElectraForTokenClassification", + "TFElectraForQuestionAnswering", + "TFElectraMainLayer", +] diff --git a/src/transformers/models/electra/tokenization_electra.py b/src/transformers/models/electra/tokenization_electra.py index 9ecbce63f50b..c29e4423b591 100644 --- a/src/transformers/models/electra/tokenization_electra.py +++ b/src/transformers/models/electra/tokenization_electra.py @@ -501,3 +501,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["ElectraTokenizer"] diff --git a/src/transformers/models/electra/tokenization_electra_fast.py b/src/transformers/models/electra/tokenization_electra_fast.py index 7b9d6a36cb92..34ea4339b938 100644 --- a/src/transformers/models/electra/tokenization_electra_fast.py +++ b/src/transformers/models/electra/tokenization_electra_fast.py @@ -167,3 +167,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["ElectraTokenizerFast"] diff --git a/src/transformers/models/encodec/__init__.py b/src/transformers/models/encodec/__init__.py index d67075e5560c..71af4d35f743 100644 --- a/src/transformers/models/encodec/__init__.py +++ b/src/transformers/models/encodec/__init__.py @@ -13,47 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_encodec": ["EncodecConfig"], - "feature_extraction_encodec": ["EncodecFeatureExtractor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_encodec"] = [ - "EncodecModel", - "EncodecPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_encodec import ( - EncodecConfig, - ) - from .feature_extraction_encodec import EncodecFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_encodec import ( - EncodecModel, - EncodecPreTrainedModel, - ) - + from .configuration_encodec import * + from .feature_extraction_encodec import * + from .modeling_encodec import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/encodec/configuration_encodec.py b/src/transformers/models/encodec/configuration_encodec.py index bc10e8ffc3d5..77fd67727dc3 100644 --- a/src/transformers/models/encodec/configuration_encodec.py +++ b/src/transformers/models/encodec/configuration_encodec.py @@ -187,3 +187,6 @@ def frame_rate(self) -> int: @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10)) + + +__all__ = ["EncodecConfig"] diff --git a/src/transformers/models/encodec/feature_extraction_encodec.py b/src/transformers/models/encodec/feature_extraction_encodec.py index 6f7536a52e9f..9bed59de45d8 100644 --- a/src/transformers/models/encodec/feature_extraction_encodec.py +++ b/src/transformers/models/encodec/feature_extraction_encodec.py @@ -204,3 +204,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["EncodecFeatureExtractor"] diff --git a/src/transformers/models/encodec/modeling_encodec.py b/src/transformers/models/encodec/modeling_encodec.py index f325a6adbe6c..869c6840fbe8 100644 --- a/src/transformers/models/encodec/modeling_encodec.py +++ b/src/transformers/models/encodec/modeling_encodec.py @@ -805,3 +805,6 @@ def forward( return (audio_codes, audio_values) return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values) + + +__all__ = ["EncodecPreTrainedModel", "EncodecModel"] diff --git a/src/transformers/models/encoder_decoder/__init__.py b/src/transformers/models/encoder_decoder/__init__.py index ba71f1f7c7a9..555f6523cf5f 100644 --- a/src/transformers/models/encoder_decoder/__init__.py +++ b/src/transformers/models/encoder_decoder/__init__.py @@ -11,72 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_encoder_decoder"] = ["EncoderDecoderModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_encoder_decoder"] = ["TFEncoderDecoderModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_encoder_decoder"] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_encoder_decoder import EncoderDecoderConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_encoder_decoder import EncoderDecoderModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_encoder_decoder import TFEncoderDecoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel - + from .configuration_encoder_decoder import * + from .modeling_encoder_decoder import * + from .modeling_flax_encoder_decoder import * + from .modeling_tf_encoder_decoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py index ab5d49b32fea..0bcf178067b0 100644 --- a/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py @@ -106,3 +106,6 @@ def from_encoder_decoder_configs( decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) + + +__all__ = ["EncoderDecoderConfig"] diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index db65f6e5250f..84130a1b6e07 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -691,3 +691,6 @@ def resize_token_embeddings(self, *args, **kwargs): def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx) + + +__all__ = ["EncoderDecoderModel"] diff --git a/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py index 24b053969c7e..bdc589484cda 100644 --- a/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py @@ -896,3 +896,6 @@ def from_encoder_decoder_pretrained( model.params["decoder"] = decoder.params return model + + +__all__ = ["FlaxEncoderDecoderModel"] diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index 85802b77f383..66009fc3ef06 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -660,3 +660,6 @@ def build(self, input_shape=None): if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) + + +__all__ = ["TFEncoderDecoderModel"] diff --git a/src/transformers/models/ernie/__init__.py b/src/transformers/models/ernie/__init__.py index ddd3b30365d8..bdcdfb4d10b6 100644 --- a/src/transformers/models/ernie/__init__.py +++ b/src/transformers/models/ernie/__init__.py @@ -11,58 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available - - -_import_structure = { - "configuration_ernie": ["ErnieConfig", "ErnieOnnxConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ernie"] = [ - "ErnieForCausalLM", - "ErnieForMaskedLM", - "ErnieForMultipleChoice", - "ErnieForNextSentencePrediction", - "ErnieForPreTraining", - "ErnieForQuestionAnswering", - "ErnieForSequenceClassification", - "ErnieForTokenClassification", - "ErnieModel", - "ErniePreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_ernie import ErnieConfig, ErnieOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ernie import ( - ErnieForCausalLM, - ErnieForMaskedLM, - ErnieForMultipleChoice, - ErnieForNextSentencePrediction, - ErnieForPreTraining, - ErnieForQuestionAnswering, - ErnieForSequenceClassification, - ErnieForTokenClassification, - ErnieModel, - ErniePreTrainedModel, - ) - + from .configuration_ernie import * + from .modeling_ernie import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ernie/configuration_ernie.py b/src/transformers/models/ernie/configuration_ernie.py index 808a0c27220c..655e40e163b5 100644 --- a/src/transformers/models/ernie/configuration_ernie.py +++ b/src/transformers/models/ernie/configuration_ernie.py @@ -158,3 +158,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("task_type_ids", dynamic_axis), ] ) + + +__all__ = ["ErnieConfig", "ErnieOnnxConfig"] diff --git a/src/transformers/models/ernie/modeling_ernie.py b/src/transformers/models/ernie/modeling_ernie.py index 6a0a26a5cbe5..baf539203e2c 100644 --- a/src/transformers/models/ernie/modeling_ernie.py +++ b/src/transformers/models/ernie/modeling_ernie.py @@ -1827,3 +1827,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "ErniePreTrainedModel", + "ErnieModel", + "ErnieForPreTraining", + "ErnieForCausalLM", + "ErnieForMaskedLM", + "ErnieForNextSentencePrediction", + "ErnieForSequenceClassification", + "ErnieForMultipleChoice", + "ErnieForTokenClassification", + "ErnieForQuestionAnswering", +] diff --git a/src/transformers/models/esm/__init__.py b/src/transformers/models/esm/__init__.py index a764bedc3fad..d71136ccf095 100644 --- a/src/transformers/models/esm/__init__.py +++ b/src/transformers/models/esm/__init__.py @@ -13,78 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_esm": ["EsmConfig"], - "tokenization_esm": ["EsmTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_esm"] = [ - "EsmForMaskedLM", - "EsmForSequenceClassification", - "EsmForTokenClassification", - "EsmModel", - "EsmPreTrainedModel", - ] - _import_structure["modeling_esmfold"] = ["EsmForProteinFolding", "EsmFoldPreTrainedModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_esm"] = [ - "TFEsmForMaskedLM", - "TFEsmForSequenceClassification", - "TFEsmForTokenClassification", - "TFEsmModel", - "TFEsmPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_esm import EsmConfig - from .tokenization_esm import EsmTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_esm import ( - EsmForMaskedLM, - EsmForSequenceClassification, - EsmForTokenClassification, - EsmModel, - EsmPreTrainedModel, - ) - from .modeling_esmfold import EsmFoldPreTrainedModel, EsmForProteinFolding - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_esm import ( - TFEsmForMaskedLM, - TFEsmForSequenceClassification, - TFEsmForTokenClassification, - TFEsmModel, - TFEsmPreTrainedModel, - ) - - + from .configuration_esm import * + from .modeling_esm import * + from .modeling_esmfold import * + from .modeling_tf_esm import * + from .tokenization_esm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/esm/configuration_esm.py b/src/transformers/models/esm/configuration_esm.py index 9634a20015f2..8df71cf8d9d5 100644 --- a/src/transformers/models/esm/configuration_esm.py +++ b/src/transformers/models/esm/configuration_esm.py @@ -357,3 +357,6 @@ def get_default_vocab_list(): "", "", ) + + +__all__ = ["EsmConfig"] diff --git a/src/transformers/models/esm/modeling_esm.py b/src/transformers/models/esm/modeling_esm.py index 5df5435bb122..9c513a194ed3 100755 --- a/src/transformers/models/esm/modeling_esm.py +++ b/src/transformers/models/esm/modeling_esm.py @@ -1260,3 +1260,12 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "EsmPreTrainedModel", + "EsmModel", + "EsmForMaskedLM", + "EsmForSequenceClassification", + "EsmForTokenClassification", +] diff --git a/src/transformers/models/esm/modeling_esmfold.py b/src/transformers/models/esm/modeling_esmfold.py index 3aaf81196072..158c8605496a 100644 --- a/src/transformers/models/esm/modeling_esmfold.py +++ b/src/transformers/models/esm/modeling_esmfold.py @@ -2320,3 +2320,6 @@ def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]: """Returns the pdb (file) string from the model given an input sequence.""" output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output) + + +__all__ = ["EsmFoldPreTrainedModel", "EsmForProteinFolding"] diff --git a/src/transformers/models/esm/modeling_tf_esm.py b/src/transformers/models/esm/modeling_tf_esm.py index 0e5cf3d8f61f..2ca6c626ce31 100644 --- a/src/transformers/models/esm/modeling_tf_esm.py +++ b/src/transformers/models/esm/modeling_tf_esm.py @@ -1564,3 +1564,13 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = tf.cast(input_ids != padding_idx, tf.int64) incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + padding_idx + + +__all__ = [ + "TFEsmPreTrainedModel", + "TFEsmModel", + "TFEsmForMaskedLM", + "TFEsmForSequenceClassification", + "TFEsmForTokenClassification", + "TFEsmMainLayer", +] diff --git a/src/transformers/models/esm/tokenization_esm.py b/src/transformers/models/esm/tokenization_esm.py index fbb759c1d171..4bc433e350e1 100644 --- a/src/transformers/models/esm/tokenization_esm.py +++ b/src/transformers/models/esm/tokenization_esm.py @@ -142,3 +142,6 @@ def save_vocabulary(self, save_directory, filename_prefix): @property def vocab_size(self) -> int: return len(self.all_tokens) + + +__all__ = ["EsmTokenizer"] diff --git a/src/transformers/models/falcon/__init__.py b/src/transformers/models/falcon/__init__.py index 62c1c9262b70..5e9b72a829df 100644 --- a/src/transformers/models/falcon/__init__.py +++ b/src/transformers/models/falcon/__init__.py @@ -14,53 +14,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_falcon": ["FalconConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_falcon"] = [ - "FalconForCausalLM", - "FalconModel", - "FalconPreTrainedModel", - "FalconForSequenceClassification", - "FalconForTokenClassification", - "FalconForQuestionAnswering", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_falcon import FalconConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_falcon import ( - FalconForCausalLM, - FalconForQuestionAnswering, - FalconForSequenceClassification, - FalconForTokenClassification, - FalconModel, - FalconPreTrainedModel, - ) - - + from .configuration_falcon import * + from .modeling_falcon import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/falcon/configuration_falcon.py b/src/transformers/models/falcon/configuration_falcon.py index 0dd61047dd27..3a9ebe8ab818 100644 --- a/src/transformers/models/falcon/configuration_falcon.py +++ b/src/transformers/models/falcon/configuration_falcon.py @@ -201,3 +201,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["FalconConfig"] diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index a340689a7c3f..de7ba0c3ae7e 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -1707,3 +1707,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "FalconPreTrainedModel", + "FalconModel", + "FalconForCausalLM", + "FalconForSequenceClassification", + "FalconForTokenClassification", + "FalconForQuestionAnswering", +] diff --git a/src/transformers/models/fastspeech2_conformer/__init__.py b/src/transformers/models/fastspeech2_conformer/__init__.py index 2014f74be1f7..db248883445f 100644 --- a/src/transformers/models/fastspeech2_conformer/__init__.py +++ b/src/transformers/models/fastspeech2_conformer/__init__.py @@ -13,57 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_fastspeech2_conformer": [ - "FastSpeech2ConformerConfig", - "FastSpeech2ConformerHifiGanConfig", - "FastSpeech2ConformerWithHifiGanConfig", - ], - "tokenization_fastspeech2_conformer": ["FastSpeech2ConformerTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fastspeech2_conformer"] = [ - "FastSpeech2ConformerWithHifiGan", - "FastSpeech2ConformerHifiGan", - "FastSpeech2ConformerModel", - "FastSpeech2ConformerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_fastspeech2_conformer import ( - FastSpeech2ConformerConfig, - FastSpeech2ConformerHifiGanConfig, - FastSpeech2ConformerWithHifiGanConfig, - ) - from .tokenization_fastspeech2_conformer import FastSpeech2ConformerTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fastspeech2_conformer import ( - FastSpeech2ConformerHifiGan, - FastSpeech2ConformerModel, - FastSpeech2ConformerPreTrainedModel, - FastSpeech2ConformerWithHifiGan, - ) - + from .configuration_fastspeech2_conformer import * + from .modeling_fastspeech2_conformer import * + from .tokenization_fastspeech2_conformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py index ade5b8b26675..eb2c3f5e81b2 100644 --- a/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py @@ -473,3 +473,6 @@ def __init__( self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config) super().__init__(**kwargs) + + +__all__ = ["FastSpeech2ConformerConfig", "FastSpeech2ConformerHifiGanConfig", "FastSpeech2ConformerWithHifiGanConfig"] diff --git a/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py index e97e276b18f6..8708e53b19e6 100644 --- a/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py @@ -1679,3 +1679,11 @@ def forward( return model_outputs + (waveform,) return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs) + + +__all__ = [ + "FastSpeech2ConformerPreTrainedModel", + "FastSpeech2ConformerModel", + "FastSpeech2ConformerHifiGan", + "FastSpeech2ConformerWithHifiGan", +] diff --git a/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py b/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py index 65c081c4c143..faa1420d71c4 100644 --- a/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +++ b/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py @@ -183,3 +183,6 @@ def __setstate__(self, d): "You need to install g2p-en to use FastSpeech2ConformerTokenizer. " "See https://pypi.org/project/g2p-en/ for installation." ) + + +__all__ = ["FastSpeech2ConformerTokenizer"] diff --git a/src/transformers/models/flaubert/__init__.py b/src/transformers/models/flaubert/__init__.py index 94cf7b661396..60b8b72eee28 100644 --- a/src/transformers/models/flaubert/__init__.py +++ b/src/transformers/models/flaubert/__init__.py @@ -11,89 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_flaubert": ["FlaubertConfig", "FlaubertOnnxConfig"], - "tokenization_flaubert": ["FlaubertTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flaubert"] = [ - "FlaubertForMultipleChoice", - "FlaubertForQuestionAnswering", - "FlaubertForQuestionAnsweringSimple", - "FlaubertForSequenceClassification", - "FlaubertForTokenClassification", - "FlaubertModel", - "FlaubertWithLMHeadModel", - "FlaubertPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_flaubert"] = [ - "TFFlaubertForMultipleChoice", - "TFFlaubertForQuestionAnsweringSimple", - "TFFlaubertForSequenceClassification", - "TFFlaubertForTokenClassification", - "TFFlaubertModel", - "TFFlaubertPreTrainedModel", - "TFFlaubertWithLMHeadModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_flaubert import FlaubertConfig, FlaubertOnnxConfig - from .tokenization_flaubert import FlaubertTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flaubert import ( - FlaubertForMultipleChoice, - FlaubertForQuestionAnswering, - FlaubertForQuestionAnsweringSimple, - FlaubertForSequenceClassification, - FlaubertForTokenClassification, - FlaubertModel, - FlaubertPreTrainedModel, - FlaubertWithLMHeadModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_flaubert import ( - TFFlaubertForMultipleChoice, - TFFlaubertForQuestionAnsweringSimple, - TFFlaubertForSequenceClassification, - TFFlaubertForTokenClassification, - TFFlaubertModel, - TFFlaubertPreTrainedModel, - TFFlaubertWithLMHeadModel, - ) - + from .configuration_flaubert import * + from .modeling_flaubert import * + from .modeling_tf_flaubert import * + from .tokenization_flaubert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/flaubert/configuration_flaubert.py b/src/transformers/models/flaubert/configuration_flaubert.py index ae5e07245e9c..93e4645da565 100644 --- a/src/transformers/models/flaubert/configuration_flaubert.py +++ b/src/transformers/models/flaubert/configuration_flaubert.py @@ -230,3 +230,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["FlaubertConfig", "FlaubertOnnxConfig"] diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 50c6f7ede222..8863a7e9bdc7 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -644,6 +644,7 @@ def forward( FLAUBERT_START_DOCSTRING, ) # Copied transformers.models.xlm.modeling_xlm.XLMWithLMHeadModel with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert + class FlaubertWithLMHeadModel(FlaubertPreTrainedModel): _tied_weights_keys = ["pred_layer.proj.weight"] @@ -742,6 +743,7 @@ def forward( FLAUBERT_START_DOCSTRING, ) # Copied transformers.models.xlm.modeling_xlm.XLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert + class FlaubertForSequenceClassification(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) @@ -1297,3 +1299,15 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "FlaubertPreTrainedModel", + "FlaubertModel", + "FlaubertWithLMHeadModel", + "FlaubertForSequenceClassification", + "FlaubertForTokenClassification", + "FlaubertForQuestionAnsweringSimple", + "FlaubertForQuestionAnswering", + "FlaubertForMultipleChoice", +] diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index 71e371da241a..1d40b9401e5b 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -1331,3 +1331,15 @@ def build(self, input_shape=None): if getattr(self, "logits_proj", None) is not None: with tf.name_scope(self.logits_proj.name): self.logits_proj.build([None, None, self.config.num_labels]) + + +__all__ = [ + "TFFlaubertPreTrainedModel", + "TFFlaubertModel", + "TFFlaubertWithLMHeadModel", + "TFFlaubertForSequenceClassification", + "TFFlaubertForQuestionAnsweringSimple", + "TFFlaubertForTokenClassification", + "TFFlaubertForMultipleChoice", + "TFFlaubertMainLayer", +] diff --git a/src/transformers/models/flaubert/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py index be9a4e79605f..ac9e5aa4336c 100644 --- a/src/transformers/models/flaubert/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -563,3 +563,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["FlaubertTokenizer"] diff --git a/src/transformers/models/flava/__init__.py b/src/transformers/models/flava/__init__.py index 9fbe54524a6d..75bf09f94628 100644 --- a/src/transformers/models/flava/__init__.py +++ b/src/transformers/models/flava/__init__.py @@ -13,81 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_flava": [ - "FlavaConfig", - "FlavaImageCodebookConfig", - "FlavaImageConfig", - "FlavaMultimodalConfig", - "FlavaTextConfig", - ], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_flava"] = ["FlavaFeatureExtractor"] - _import_structure["image_processing_flava"] = ["FlavaImageProcessor"] - _import_structure["processing_flava"] = ["FlavaProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flava"] = [ - "FlavaForPreTraining", - "FlavaImageCodebook", - "FlavaImageModel", - "FlavaModel", - "FlavaMultimodalModel", - "FlavaPreTrainedModel", - "FlavaTextModel", - ] - if TYPE_CHECKING: - from .configuration_flava import ( - FlavaConfig, - FlavaImageCodebookConfig, - FlavaImageConfig, - FlavaMultimodalConfig, - FlavaTextConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_flava import FlavaFeatureExtractor - from .image_processing_flava import FlavaImageProcessor - from .processing_flava import FlavaProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flava import ( - FlavaForPreTraining, - FlavaImageCodebook, - FlavaImageModel, - FlavaModel, - FlavaMultimodalModel, - FlavaPreTrainedModel, - FlavaTextModel, - ) - + from .configuration_flava import * + from .feature_extraction_flava import * + from .image_processing_flava import * + from .modeling_flava import * + from .processing_flava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/flava/configuration_flava.py b/src/transformers/models/flava/configuration_flava.py index b6349361c0dd..648fb5039717 100644 --- a/src/transformers/models/flava/configuration_flava.py +++ b/src/transformers/models/flava/configuration_flava.py @@ -759,3 +759,6 @@ def from_configs( image_codebook_config=image_codebook_config.to_dict(), **kwargs, ) + + +__all__ = ["FlavaImageConfig", "FlavaTextConfig", "FlavaMultimodalConfig", "FlavaImageCodebookConfig", "FlavaConfig"] diff --git a/src/transformers/models/flava/feature_extraction_flava.py b/src/transformers/models/flava/feature_extraction_flava.py index c707b575cef2..157472e9287a 100644 --- a/src/transformers/models/flava/feature_extraction_flava.py +++ b/src/transformers/models/flava/feature_extraction_flava.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_flava import FlavaImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class FlavaFeatureExtractor(FlavaImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["FlavaFeatureExtractor"] diff --git a/src/transformers/models/flava/image_processing_flava.py b/src/transformers/models/flava/image_processing_flava.py index 72ef141df83d..d29ee4940e20 100644 --- a/src/transformers/models/flava/image_processing_flava.py +++ b/src/transformers/models/flava/image_processing_flava.py @@ -37,6 +37,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -133,6 +134,7 @@ def __call__(self): return mask +@export(backends=("vision",)) class FlavaImageProcessor(BaseImageProcessor): r""" Constructs a Flava image processor. @@ -698,3 +700,6 @@ def preprocess( data["bool_masked_pos"] = masks return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["FlavaImageProcessor"] diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index 589385dffecf..62d10c8dc3e5 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -2102,3 +2102,14 @@ def forward( mmm_image_logits=mmm_image_logits, mmm_text_logits=mmm_text_logits, ) + + +__all__ = [ + "FlavaPreTrainedModel", + "FlavaImageModel", + "FlavaTextModel", + "FlavaMultimodalModel", + "FlavaModel", + "FlavaImageCodebook", + "FlavaForPreTraining", +] diff --git a/src/transformers/models/flava/processing_flava.py b/src/transformers/models/flava/processing_flava.py index 7f439b040a8f..6c178b6dada4 100644 --- a/src/transformers/models/flava/processing_flava.py +++ b/src/transformers/models/flava/processing_flava.py @@ -23,8 +23,10 @@ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType +from ...utils.import_utils import export +@export(backends=("vision",)) class FlavaProcessor(ProcessorMixin): r""" Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor. @@ -163,3 +165,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["FlavaProcessor"] diff --git a/src/transformers/models/fnet/__init__.py b/src/transformers/models/fnet/__init__.py index 08b6ddf864e1..c7c6bd54679a 100644 --- a/src/transformers/models/fnet/__init__.py +++ b/src/transformers/models/fnet/__init__.py @@ -13,93 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_fnet": ["FNetConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_fnet"] = ["FNetTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_fnet_fast"] = ["FNetTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fnet"] = [ - "FNetForMaskedLM", - "FNetForMultipleChoice", - "FNetForNextSentencePrediction", - "FNetForPreTraining", - "FNetForQuestionAnswering", - "FNetForSequenceClassification", - "FNetForTokenClassification", - "FNetLayer", - "FNetModel", - "FNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_fnet import FNetConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_fnet import FNetTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_fnet_fast import FNetTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fnet import ( - FNetForMaskedLM, - FNetForMultipleChoice, - FNetForNextSentencePrediction, - FNetForPreTraining, - FNetForQuestionAnswering, - FNetForSequenceClassification, - FNetForTokenClassification, - FNetLayer, - FNetModel, - FNetPreTrainedModel, - ) - - + from .configuration_fnet import * + from .modeling_fnet import * + from .tokenization_fnet import * + from .tokenization_fnet_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fnet/configuration_fnet.py b/src/transformers/models/fnet/configuration_fnet.py index 90b77fc5d77a..24a578328565 100644 --- a/src/transformers/models/fnet/configuration_fnet.py +++ b/src/transformers/models/fnet/configuration_fnet.py @@ -114,3 +114,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations self.tpu_short_seq_length = tpu_short_seq_length + + +__all__ = ["FNetConfig"] diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index b1842dbc89d8..15a2e05076f8 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -1183,3 +1183,16 @@ def forward( return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states ) + + +__all__ = [ + "FNetPreTrainedModel", + "FNetModel", + "FNetForPreTraining", + "FNetForMaskedLM", + "FNetForNextSentencePrediction", + "FNetForSequenceClassification", + "FNetForMultipleChoice", + "FNetForTokenClassification", + "FNetForQuestionAnswering", +] diff --git a/src/transformers/models/fnet/tokenization_fnet.py b/src/transformers/models/fnet/tokenization_fnet.py index 29095c80ff02..7008ac694d85 100644 --- a/src/transformers/models/fnet/tokenization_fnet.py +++ b/src/transformers/models/fnet/tokenization_fnet.py @@ -23,6 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -32,6 +33,7 @@ SPIECE_UNDERLINE = "▁" +@export(backends=("sentencepiece",)) class FNetTokenizer(PreTrainedTokenizer): """ Construct an FNet tokenizer. Adapted from [`AlbertTokenizer`]. Based on @@ -336,3 +338,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["FNetTokenizer"] diff --git a/src/transformers/models/fnet/tokenization_fnet_fast.py b/src/transformers/models/fnet/tokenization_fnet_fast.py index 3136b9f27c22..ac33bc13c60c 100644 --- a/src/transformers/models/fnet/tokenization_fnet_fast.py +++ b/src/transformers/models/fnet/tokenization_fnet_fast.py @@ -184,3 +184,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["FNetTokenizerFast"] diff --git a/src/transformers/models/focalnet/__init__.py b/src/transformers/models/focalnet/__init__.py index ceacb8a52a17..0dc083be18c5 100644 --- a/src/transformers/models/focalnet/__init__.py +++ b/src/transformers/models/focalnet/__init__.py @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_focalnet": ["FocalNetConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_focalnet"] = [ - "FocalNetForImageClassification", - "FocalNetForMaskedImageModeling", - "FocalNetBackbone", - "FocalNetModel", - "FocalNetPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_focalnet import FocalNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_focalnet import ( - FocalNetBackbone, - FocalNetForImageClassification, - FocalNetForMaskedImageModeling, - FocalNetModel, - FocalNetPreTrainedModel, - ) - + from .configuration_focalnet import * + from .modeling_focalnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/focalnet/configuration_focalnet.py b/src/transformers/models/focalnet/configuration_focalnet.py index 577530e2ecca..8fe5831a1122 100644 --- a/src/transformers/models/focalnet/configuration_focalnet.py +++ b/src/transformers/models/focalnet/configuration_focalnet.py @@ -159,3 +159,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["FocalNetConfig"] diff --git a/src/transformers/models/focalnet/modeling_focalnet.py b/src/transformers/models/focalnet/modeling_focalnet.py index 99f2dc658fcb..841821f23511 100644 --- a/src/transformers/models/focalnet/modeling_focalnet.py +++ b/src/transformers/models/focalnet/modeling_focalnet.py @@ -1027,3 +1027,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "FocalNetPreTrainedModel", + "FocalNetModel", + "FocalNetForMaskedImageModeling", + "FocalNetForImageClassification", + "FocalNetBackbone", +] diff --git a/src/transformers/models/fsmt/__init__.py b/src/transformers/models/fsmt/__init__.py index db960e4a5ce9..2b8b02a33692 100644 --- a/src/transformers/models/fsmt/__init__.py +++ b/src/transformers/models/fsmt/__init__.py @@ -11,39 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_fsmt": ["FSMTConfig"], - "tokenization_fsmt": ["FSMTTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fsmt"] = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_fsmt import FSMTConfig - from .tokenization_fsmt import FSMTTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel - + from .configuration_fsmt import * + from .modeling_fsmt import * + from .tokenization_fsmt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fsmt/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py index 72af4ddab239..96b617e0da51 100644 --- a/src/transformers/models/fsmt/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -213,3 +213,6 @@ def __init__( early_stopping=early_stopping, **common_kwargs, ) + + +__all__ = ["FSMTConfig"] diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 179408aba38e..aef82cb00414 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -1387,3 +1387,6 @@ def forward( self.make_weight(max_pos, self.embedding_dim, self.padding_idx) positions = self.make_positions(input, self.padding_idx) return super().forward(positions) + + +__all__ = ["PretrainedFSMTModel", "FSMTModel", "FSMTForConditionalGeneration"] diff --git a/src/transformers/models/fsmt/tokenization_fsmt.py b/src/transformers/models/fsmt/tokenization_fsmt.py index d1f1ee4cac2b..ce28766100e3 100644 --- a/src/transformers/models/fsmt/tokenization_fsmt.py +++ b/src/transformers/models/fsmt/tokenization_fsmt.py @@ -516,3 +516,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["FSMTTokenizer"] diff --git a/src/transformers/models/funnel/__init__.py b/src/transformers/models/funnel/__init__.py index aa620540dc3f..b4a3d22ef8c9 100644 --- a/src/transformers/models/funnel/__init__.py +++ b/src/transformers/models/funnel/__init__.py @@ -11,120 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_funnel": ["FunnelConfig"], - "convert_funnel_original_tf_checkpoint_to_pytorch": [], - "tokenization_funnel": ["FunnelTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_funnel"] = [ - "FunnelBaseModel", - "FunnelForMaskedLM", - "FunnelForMultipleChoice", - "FunnelForPreTraining", - "FunnelForQuestionAnswering", - "FunnelForSequenceClassification", - "FunnelForTokenClassification", - "FunnelModel", - "FunnelPreTrainedModel", - "load_tf_weights_in_funnel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_funnel"] = [ - "TFFunnelBaseModel", - "TFFunnelForMaskedLM", - "TFFunnelForMultipleChoice", - "TFFunnelForPreTraining", - "TFFunnelForQuestionAnswering", - "TFFunnelForSequenceClassification", - "TFFunnelForTokenClassification", - "TFFunnelModel", - "TFFunnelPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_funnel import FunnelConfig - from .tokenization_funnel import FunnelTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_funnel_fast import FunnelTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_funnel import ( - FunnelBaseModel, - FunnelForMaskedLM, - FunnelForMultipleChoice, - FunnelForPreTraining, - FunnelForQuestionAnswering, - FunnelForSequenceClassification, - FunnelForTokenClassification, - FunnelModel, - FunnelPreTrainedModel, - load_tf_weights_in_funnel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_funnel import ( - TFFunnelBaseModel, - TFFunnelForMaskedLM, - TFFunnelForMultipleChoice, - TFFunnelForPreTraining, - TFFunnelForQuestionAnswering, - TFFunnelForSequenceClassification, - TFFunnelForTokenClassification, - TFFunnelModel, - TFFunnelPreTrainedModel, - ) - + from .configuration_funnel import * + from .modeling_funnel import * + from .modeling_tf_funnel import * + from .tokenization_funnel import * + from .tokenization_funnel_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/funnel/configuration_funnel.py b/src/transformers/models/funnel/configuration_funnel.py index 53d072d4c82e..b164f286042a 100644 --- a/src/transformers/models/funnel/configuration_funnel.py +++ b/src/transformers/models/funnel/configuration_funnel.py @@ -161,3 +161,6 @@ def num_blocks(self): @num_blocks.setter def num_blocks(self, value): raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.") + + +__all__ = ["FunnelConfig"] diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index b4fdfd5fc567..b69cfbabe77c 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -1592,3 +1592,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_funnel", + "FunnelPreTrainedModel", + "FunnelBaseModel", + "FunnelModel", + "FunnelForPreTraining", + "FunnelForMaskedLM", + "FunnelForSequenceClassification", + "FunnelForMultipleChoice", + "FunnelForTokenClassification", + "FunnelForQuestionAnswering", +] diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index ab5f14a4c66d..8fa328c4cbb3 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -1865,3 +1865,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFFunnelPreTrainedModel", + "TFFunnelBaseModel", + "TFFunnelModel", + "TFFunnelForPreTraining", + "TFFunnelForMaskedLM", + "TFFunnelForSequenceClassification", + "TFFunnelForMultipleChoice", + "TFFunnelForTokenClassification", + "TFFunnelForQuestionAnswering", +] diff --git a/src/transformers/models/funnel/tokenization_funnel.py b/src/transformers/models/funnel/tokenization_funnel.py index 68e7d958b748..86399ec89f56 100644 --- a/src/transformers/models/funnel/tokenization_funnel.py +++ b/src/transformers/models/funnel/tokenization_funnel.py @@ -532,3 +532,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["FunnelTokenizer"] diff --git a/src/transformers/models/funnel/tokenization_funnel_fast.py b/src/transformers/models/funnel/tokenization_funnel_fast.py index 6a48f2f54a87..c3e45ed62ac2 100644 --- a/src/transformers/models/funnel/tokenization_funnel_fast.py +++ b/src/transformers/models/funnel/tokenization_funnel_fast.py @@ -198,3 +198,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["FunnelTokenizerFast"] diff --git a/src/transformers/models/fuyu/__init__.py b/src/transformers/models/fuyu/__init__.py index 403acb1964c1..4575b2adebfe 100644 --- a/src/transformers/models/fuyu/__init__.py +++ b/src/transformers/models/fuyu/__init__.py @@ -13,61 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_fuyu": ["FuyuConfig"], -} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_fuyu"] = ["FuyuImageProcessor"] - _import_structure["processing_fuyu"] = ["FuyuProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_fuyu"] = [ - "FuyuForCausalLM", - "FuyuPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_fuyu import FuyuConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_fuyu import FuyuImageProcessor - from .processing_fuyu import FuyuProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_fuyu import ( - FuyuForCausalLM, - FuyuPreTrainedModel, - ) - - + from .configuration_fuyu import * + from .image_processing_fuyu import * + from .modeling_fuyu import * + from .processing_fuyu import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/fuyu/configuration_fuyu.py b/src/transformers/models/fuyu/configuration_fuyu.py index 03d2aecc02b6..d0fed58f30fc 100644 --- a/src/transformers/models/fuyu/configuration_fuyu.py +++ b/src/transformers/models/fuyu/configuration_fuyu.py @@ -224,3 +224,6 @@ def to_dict(self): output = super().to_dict() output.pop("_vocab_size", None) return output + + +__all__ = ["FuyuConfig"] diff --git a/src/transformers/models/fuyu/image_processing_fuyu.py b/src/transformers/models/fuyu/image_processing_fuyu.py index 255922b83088..6b60e0753653 100644 --- a/src/transformers/models/fuyu/image_processing_fuyu.py +++ b/src/transformers/models/fuyu/image_processing_fuyu.py @@ -46,6 +46,7 @@ logging, requires_backends, ) +from ...utils.import_utils import export if is_torch_available(): @@ -179,6 +180,7 @@ def _to(elem): return self +@export(backends=("vision",)) class FuyuImageProcessor(BaseImageProcessor): """ This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should @@ -718,3 +720,6 @@ def preprocess_with_tokenizer_info( "image_patch_indices_per_subsequence": image_patch_indices_per_subsequence, } ) + + +__all__ = ["FuyuImageProcessor"] diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index b4b6330d0d86..296478ff5434 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -383,3 +383,6 @@ def prepare_inputs_for_generation( } ) return model_inputs + + +__all__ = ["FuyuPreTrainedModel", "FuyuForCausalLM"] diff --git a/src/transformers/models/fuyu/processing_fuyu.py b/src/transformers/models/fuyu/processing_fuyu.py index 6b542ba3378e..65701eeb1298 100644 --- a/src/transformers/models/fuyu/processing_fuyu.py +++ b/src/transformers/models/fuyu/processing_fuyu.py @@ -24,6 +24,7 @@ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy from ...utils import TensorType, is_torch_available, logging, requires_backends +from ...utils.import_utils import export if is_torch_available(): @@ -307,6 +308,7 @@ def scale_bbox_to_transformed_image( return [top_scaled, left_scaled, bottom_scaled, right_scaled] +@export(backends=("vision",)) class FuyuProcessor(ProcessorMixin): r""" Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor. @@ -694,3 +696,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["FuyuProcessor"] diff --git a/src/transformers/models/gemma/__init__.py b/src/transformers/models/gemma/__init__.py index 1aafae6e88c2..2ae1c9efc360 100644 --- a/src/transformers/models/gemma/__init__.py +++ b/src/transformers/models/gemma/__init__.py @@ -13,111 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_gemma": ["GemmaConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gemma"] = ["GemmaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gemma_fast"] = ["GemmaTokenizerFast"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gemma"] = [ - "GemmaForCausalLM", - "GemmaModel", - "GemmaPreTrainedModel", - "GemmaForSequenceClassification", - "GemmaForTokenClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gemma"] = [ - "FlaxGemmaForCausalLM", - "FlaxGemmaModel", - "FlaxGemmaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gemma import GemmaConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gemma import GemmaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gemma_fast import GemmaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gemma import ( - GemmaForCausalLM, - GemmaForSequenceClassification, - GemmaForTokenClassification, - GemmaModel, - GemmaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gemma import ( - FlaxGemmaForCausalLM, - FlaxGemmaModel, - FlaxGemmaPreTrainedModel, - ) - - + from .configuration_gemma import * + from .modeling_flax_gemma import * + from .modeling_gemma import * + from .tokenization_gemma import * + from .tokenization_gemma_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gemma/configuration_gemma.py b/src/transformers/models/gemma/configuration_gemma.py index e8de9ddcee2e..d7f62c42abbd 100644 --- a/src/transformers/models/gemma/configuration_gemma.py +++ b/src/transformers/models/gemma/configuration_gemma.py @@ -143,3 +143,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["GemmaConfig"] diff --git a/src/transformers/models/gemma/modeling_flax_gemma.py b/src/transformers/models/gemma/modeling_flax_gemma.py index 16291f3c3abe..9e21f1e1cc40 100644 --- a/src/transformers/models/gemma/modeling_flax_gemma.py +++ b/src/transformers/models/gemma/modeling_flax_gemma.py @@ -772,3 +772,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) + +__all__ = ["FlaxGemmaPreTrainedModel", "FlaxGemmaModel", "FlaxGemmaForCausalLM"] diff --git a/src/transformers/models/gemma/modeling_gemma.py b/src/transformers/models/gemma/modeling_gemma.py index 085751cd9bc0..15a43ce91ddd 100644 --- a/src/transformers/models/gemma/modeling_gemma.py +++ b/src/transformers/models/gemma/modeling_gemma.py @@ -764,6 +764,8 @@ def _init_weights(self, module): "The bare Gemma Model outputting raw hidden-states without any specific head on top.", GEMMA_START_DOCSTRING, ) + +# Copied from transformers.models.llama.modeling_llama.LlamaModel with LLAMA->GEMMA,Llama->Gemma class GemmaModel(GemmaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`GemmaDecoderLayer`] @@ -1402,3 +1404,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GemmaPreTrainedModel", + "GemmaModel", + "GemmaForCausalLM", + "GemmaForSequenceClassification", + "GemmaForTokenClassification", +] diff --git a/src/transformers/models/gemma/tokenization_gemma.py b/src/transformers/models/gemma/tokenization_gemma.py index 09e779478c0e..b5f050084575 100644 --- a/src/transformers/models/gemma/tokenization_gemma.py +++ b/src/transformers/models/gemma/tokenization_gemma.py @@ -23,6 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export if TYPE_CHECKING: @@ -35,6 +36,7 @@ SPIECE_UNDERLINE = "▁" +@export(backends=("sentencepiece",)) class GemmaTokenizer(PreTrainedTokenizer): """ Construct a Gemma tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is @@ -325,3 +327,6 @@ def create_token_type_ids_from_sequences( output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output + + +__all__ = ["GemmaTokenizer"] diff --git a/src/transformers/models/gemma/tokenization_gemma_fast.py b/src/transformers/models/gemma/tokenization_gemma_fast.py index fd7a979e8b75..0e6f4a20b6d6 100644 --- a/src/transformers/models/gemma/tokenization_gemma_fast.py +++ b/src/transformers/models/gemma/tokenization_gemma_fast.py @@ -197,3 +197,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = output + bos_token_id + token_ids_1 + eos_token_id return output + + +__all__ = ["GemmaTokenizerFast"] diff --git a/src/transformers/models/git/__init__.py b/src/transformers/models/git/__init__.py index 02f5f6d88a11..9c548540e926 100644 --- a/src/transformers/models/git/__init__.py +++ b/src/transformers/models/git/__init__.py @@ -11,48 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_git": ["GitConfig", "GitVisionConfig"], - "processing_git": ["GitProcessor"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_git"] = [ - "GitForCausalLM", - "GitModel", - "GitPreTrainedModel", - "GitVisionModel", - ] if TYPE_CHECKING: - from .configuration_git import GitConfig, GitVisionConfig - from .processing_git import GitProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_git import ( - GitForCausalLM, - GitModel, - GitPreTrainedModel, - GitVisionModel, - ) - + from .configuration_git import * + from .modeling_git import * + from .processing_git import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/git/configuration_git.py b/src/transformers/models/git/configuration_git.py index ecaea17ff946..6f55a21a75c8 100644 --- a/src/transformers/models/git/configuration_git.py +++ b/src/transformers/models/git/configuration_git.py @@ -235,3 +235,6 @@ def __init__( self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id + + +__all__ = ["GitVisionConfig", "GitConfig"] diff --git a/src/transformers/models/git/modeling_git.py b/src/transformers/models/git/modeling_git.py index 43333e3d3338..4f1ef5fa3fd3 100644 --- a/src/transformers/models/git/modeling_git.py +++ b/src/transformers/models/git/modeling_git.py @@ -1568,3 +1568,6 @@ def _reorder_cache(self, past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["GitPreTrainedModel", "GitVisionModel", "GitModel", "GitForCausalLM"] diff --git a/src/transformers/models/git/processing_git.py b/src/transformers/models/git/processing_git.py index 98649c644e72..c848ee026832 100644 --- a/src/transformers/models/git/processing_git.py +++ b/src/transformers/models/git/processing_git.py @@ -117,3 +117,6 @@ def decode(self, *args, **kwargs): @property def model_input_names(self): return ["input_ids", "attention_mask", "pixel_values"] + + +__all__ = ["GitProcessor"] diff --git a/src/transformers/models/glpn/__init__.py b/src/transformers/models/glpn/__init__.py index 9896e801c93a..2e30a91bf583 100644 --- a/src/transformers/models/glpn/__init__.py +++ b/src/transformers/models/glpn/__init__.py @@ -13,61 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_glpn": ["GLPNConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"] - _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_glpn"] = [ - "GLPNForDepthEstimation", - "GLPNLayer", - "GLPNModel", - "GLPNPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_glpn import GLPNConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_glpn import GLPNFeatureExtractor - from .image_processing_glpn import GLPNImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_glpn import ( - GLPNForDepthEstimation, - GLPNLayer, - GLPNModel, - GLPNPreTrainedModel, - ) - - + from .configuration_glpn import * + from .feature_extraction_glpn import * + from .image_processing_glpn import * + from .modeling_glpn import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/glpn/configuration_glpn.py b/src/transformers/models/glpn/configuration_glpn.py index 88e1d6e1f029..19eb04b19b86 100644 --- a/src/transformers/models/glpn/configuration_glpn.py +++ b/src/transformers/models/glpn/configuration_glpn.py @@ -130,3 +130,6 @@ def __init__( self.decoder_hidden_size = decoder_hidden_size self.max_depth = max_depth self.head_in_index = head_in_index + + +__all__ = ["GLPNConfig"] diff --git a/src/transformers/models/glpn/feature_extraction_glpn.py b/src/transformers/models/glpn/feature_extraction_glpn.py index 314268225d2a..e06bed952642 100644 --- a/src/transformers/models/glpn/feature_extraction_glpn.py +++ b/src/transformers/models/glpn/feature_extraction_glpn.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_glpn import GLPNImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class GLPNFeatureExtractor(GLPNImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["GLPNFeatureExtractor"] diff --git a/src/transformers/models/glpn/image_processing_glpn.py b/src/transformers/models/glpn/image_processing_glpn.py index 9e69c8ae8a6e..607923c05bbe 100644 --- a/src/transformers/models/glpn/image_processing_glpn.py +++ b/src/transformers/models/glpn/image_processing_glpn.py @@ -33,11 +33,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class GLPNImageProcessor(BaseImageProcessor): r""" Constructs a GLPN image processor. @@ -218,3 +220,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["GLPNImageProcessor"] diff --git a/src/transformers/models/glpn/modeling_glpn.py b/src/transformers/models/glpn/modeling_glpn.py index 9fd22ca0f7be..6a5a24af94b8 100755 --- a/src/transformers/models/glpn/modeling_glpn.py +++ b/src/transformers/models/glpn/modeling_glpn.py @@ -773,3 +773,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["GLPNPreTrainedModel", "GLPNModel", "GLPNForDepthEstimation"] diff --git a/src/transformers/models/gpt2/__init__.py b/src/transformers/models/gpt2/__init__.py index 8c77c68445a8..7099ebe64510 100644 --- a/src/transformers/models/gpt2/__init__.py +++ b/src/transformers/models/gpt2/__init__.py @@ -11,143 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_keras_nlp_available, - is_tensorflow_text_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_gpt2": ["GPT2Config", "GPT2OnnxConfig"], - "tokenization_gpt2": ["GPT2Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt2_fast"] = ["GPT2TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt2"] = [ - "GPT2DoubleHeadsModel", - "GPT2ForQuestionAnswering", - "GPT2ForSequenceClassification", - "GPT2ForTokenClassification", - "GPT2LMHeadModel", - "GPT2Model", - "GPT2PreTrainedModel", - "load_tf_weights_in_gpt2", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_gpt2"] = [ - "TFGPT2DoubleHeadsModel", - "TFGPT2ForSequenceClassification", - "TFGPT2LMHeadModel", - "TFGPT2MainLayer", - "TFGPT2Model", - "TFGPT2PreTrainedModel", - ] - -try: - if not is_keras_nlp_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt2_tf"] = ["TFGPT2Tokenizer"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gpt2"] = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"] if TYPE_CHECKING: - from .configuration_gpt2 import GPT2Config, GPT2OnnxConfig - from .tokenization_gpt2 import GPT2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt2_fast import GPT2TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt2 import ( - GPT2DoubleHeadsModel, - GPT2ForQuestionAnswering, - GPT2ForSequenceClassification, - GPT2ForTokenClassification, - GPT2LMHeadModel, - GPT2Model, - GPT2PreTrainedModel, - load_tf_weights_in_gpt2, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_gpt2 import ( - TFGPT2DoubleHeadsModel, - TFGPT2ForSequenceClassification, - TFGPT2LMHeadModel, - TFGPT2MainLayer, - TFGPT2Model, - TFGPT2PreTrainedModel, - ) - - try: - if not is_keras_nlp_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt2_tf import TFGPT2Tokenizer - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel - + from .configuration_gpt2 import * + from .modeling_flax_gpt2 import * + from .modeling_gpt2 import * + from .modeling_tf_gpt2 import * + from .tokenization_gpt2 import * + from .tokenization_gpt2_fast import * + from .tokenization_gpt2_tf import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt2/configuration_gpt2.py b/src/transformers/models/gpt2/configuration_gpt2.py index 82a24912958f..cd9d8ccaa286 100644 --- a/src/transformers/models/gpt2/configuration_gpt2.py +++ b/src/transformers/models/gpt2/configuration_gpt2.py @@ -18,10 +18,11 @@ from collections import OrderedDict from typing import Any, List, Mapping, Optional -from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec +from ...tokenization_utils import PreTrainedTokenizer, TensorType from ...utils import logging +from ...utils.import_utils import is_torch_available logger = logging.get_logger(__name__) @@ -268,3 +269,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["GPT2Config", "GPT2OnnxConfig"] diff --git a/src/transformers/models/gpt2/modeling_flax_gpt2.py b/src/transformers/models/gpt2/modeling_flax_gpt2.py index c3ef377642a3..06482b12f8c9 100644 --- a/src/transformers/models/gpt2/modeling_flax_gpt2.py +++ b/src/transformers/models/gpt2/modeling_flax_gpt2.py @@ -777,3 +777,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = ["FlaxGPT2PreTrainedModel", "FlaxGPT2Model", "FlaxGPT2LMHeadModel"] diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 8dfbfb906444..6807adbde5a3 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -1960,3 +1960,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_gpt2", + "GPT2PreTrainedModel", + "GPT2Model", + "GPT2LMHeadModel", + "GPT2DoubleHeadsModel", + "GPT2ForSequenceClassification", + "GPT2ForTokenClassification", + "GPT2ForQuestionAnswering", +] diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index acdd65006f3e..0033fc853463 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -1233,3 +1233,13 @@ def build(self, input_shape=None): if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) + + +__all__ = [ + "TFGPT2PreTrainedModel", + "TFGPT2Model", + "TFGPT2LMHeadModel", + "TFGPT2DoubleHeadsModel", + "TFGPT2ForSequenceClassification", + "TFGPT2MainLayer", +] diff --git a/src/transformers/models/gpt2/tokenization_gpt2.py b/src/transformers/models/gpt2/tokenization_gpt2.py index badacf6dbe71..709bcec5b611 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2.py +++ b/src/transformers/models/gpt2/tokenization_gpt2.py @@ -329,3 +329,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) + + +__all__ = ["GPT2Tokenizer"] diff --git a/src/transformers/models/gpt2/tokenization_gpt2_fast.py b/src/transformers/models/gpt2/tokenization_gpt2_fast.py index 90e83f0d35a3..d3f95657c0c9 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2_fast.py +++ b/src/transformers/models/gpt2/tokenization_gpt2_fast.py @@ -139,3 +139,6 @@ def _encode_plus(self, *args, **kwargs) -> BatchEncoding: def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["GPT2TokenizerFast"] diff --git a/src/transformers/models/gpt2/tokenization_gpt2_tf.py b/src/transformers/models/gpt2/tokenization_gpt2_tf.py index d763eb848550..0c0fdb3ae806 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2_tf.py +++ b/src/transformers/models/gpt2/tokenization_gpt2_tf.py @@ -102,3 +102,6 @@ def call(self, x, max_length: int = None): ) return {"attention_mask": attention_mask, "input_ids": input_ids} + + +__all__ = ["TFGPT2Tokenizer"] diff --git a/src/transformers/models/gpt_bigcode/__init__.py b/src/transformers/models/gpt_bigcode/__init__.py index 60eec86ca541..d3cee17ed802 100644 --- a/src/transformers/models/gpt_bigcode/__init__.py +++ b/src/transformers/models/gpt_bigcode/__init__.py @@ -11,53 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_gpt_bigcode": ["GPTBigCodeConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_bigcode"] = [ - "GPTBigCodeForSequenceClassification", - "GPTBigCodeForTokenClassification", - "GPTBigCodeForCausalLM", - "GPTBigCodeModel", - "GPTBigCodePreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_gpt_bigcode import GPTBigCodeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_bigcode import ( - GPTBigCodeForCausalLM, - GPTBigCodeForSequenceClassification, - GPTBigCodeForTokenClassification, - GPTBigCodeModel, - GPTBigCodePreTrainedModel, - ) - - + from .configuration_gpt_bigcode import * + from .modeling_gpt_bigcode import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py index 5bd72d23f986..46a3dfea4410 100644 --- a/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py @@ -139,3 +139,6 @@ def __init__( self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["GPTBigCodeConfig"] diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index 0f927a72469d..312f30c8fbd0 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -1417,3 +1417,12 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "GPTBigCodePreTrainedModel", + "GPTBigCodeModel", + "GPTBigCodeForCausalLM", + "GPTBigCodeForSequenceClassification", + "GPTBigCodeForTokenClassification", +] diff --git a/src/transformers/models/gpt_neo/__init__.py b/src/transformers/models/gpt_neo/__init__.py index 6c314c89f713..57a1b73b5494 100644 --- a/src/transformers/models/gpt_neo/__init__.py +++ b/src/transformers/models/gpt_neo/__init__.py @@ -13,71 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - - -_import_structure = { - "configuration_gpt_neo": ["GPTNeoConfig", "GPTNeoOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neo"] = [ - "GPTNeoForCausalLM", - "GPTNeoForQuestionAnswering", - "GPTNeoForSequenceClassification", - "GPTNeoForTokenClassification", - "GPTNeoModel", - "GPTNeoPreTrainedModel", - "load_tf_weights_in_gpt_neo", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gpt_neo"] = [ - "FlaxGPTNeoForCausalLM", - "FlaxGPTNeoModel", - "FlaxGPTNeoPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gpt_neo import GPTNeoConfig, GPTNeoOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neo import ( - GPTNeoForCausalLM, - GPTNeoForQuestionAnswering, - GPTNeoForSequenceClassification, - GPTNeoForTokenClassification, - GPTNeoModel, - GPTNeoPreTrainedModel, - load_tf_weights_in_gpt_neo, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel - - + from .configuration_gpt_neo import * + from .modeling_flax_gpt_neo import * + from .modeling_gpt_neo import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_neo/configuration_gpt_neo.py b/src/transformers/models/gpt_neo/configuration_gpt_neo.py index a3c261e855b9..a5071bf8fa2b 100644 --- a/src/transformers/models/gpt_neo/configuration_gpt_neo.py +++ b/src/transformers/models/gpt_neo/configuration_gpt_neo.py @@ -17,10 +17,11 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast +from ...tokenization_utils import PreTrainedTokenizer, TensorType from ...utils import logging +from ...utils.import_utils import is_torch_available logger = logging.get_logger(__name__) @@ -267,3 +268,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["GPTNeoConfig", "GPTNeoOnnxConfig"] diff --git a/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py index 5639ca50f166..4a55e2c57720 100644 --- a/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py @@ -682,3 +682,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) + +__all__ = ["FlaxGPTNeoPreTrainedModel", "FlaxGPTNeoModel", "FlaxGPTNeoForCausalLM"] diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 72590862b749..d7b1430fb116 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -1409,3 +1409,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_gpt_neo", + "GPTNeoPreTrainedModel", + "GPTNeoModel", + "GPTNeoForCausalLM", + "GPTNeoForSequenceClassification", + "GPTNeoForTokenClassification", + "GPTNeoForQuestionAnswering", +] diff --git a/src/transformers/models/gpt_neox/__init__.py b/src/transformers/models/gpt_neox/__init__.py index 05a6982acb0b..2e5ad2b1b536 100644 --- a/src/transformers/models/gpt_neox/__init__.py +++ b/src/transformers/models/gpt_neox/__init__.py @@ -13,66 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_gpt_neox": ["GPTNeoXConfig"]} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt_neox_fast"] = ["GPTNeoXTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neox"] = [ - "GPTNeoXForCausalLM", - "GPTNeoXForQuestionAnswering", - "GPTNeoXForSequenceClassification", - "GPTNeoXForTokenClassification", - "GPTNeoXLayer", - "GPTNeoXModel", - "GPTNeoXPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gpt_neox import GPTNeoXConfig - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neox import ( - GPTNeoXForCausalLM, - GPTNeoXForQuestionAnswering, - GPTNeoXForSequenceClassification, - GPTNeoXForTokenClassification, - GPTNeoXLayer, - GPTNeoXModel, - GPTNeoXPreTrainedModel, - ) - - + from .configuration_gpt_neox import * + from .modeling_gpt_neox import * + from .tokenization_gpt_neox_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_neox/configuration_gpt_neox.py b/src/transformers/models/gpt_neox/configuration_gpt_neox.py index 944dbb5e02f0..7cf8aa59ab59 100644 --- a/src/transformers/models/gpt_neox/configuration_gpt_neox.py +++ b/src/transformers/models/gpt_neox/configuration_gpt_neox.py @@ -173,3 +173,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["GPTNeoXConfig"] diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index 259f01fd3cb1..403e7f27f4c8 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -1565,3 +1565,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GPTNeoXPreTrainedModel", + "GPTNeoXModel", + "GPTNeoXForCausalLM", + "GPTNeoXForSequenceClassification", + "GPTNeoXForTokenClassification", + "GPTNeoXForQuestionAnswering", +] diff --git a/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py b/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py index c79e6d9ada15..5bfa06d9efae 100644 --- a/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py +++ b/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py @@ -228,3 +228,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["GPTNeoXTokenizerFast"] diff --git a/src/transformers/models/gpt_neox_japanese/__init__.py b/src/transformers/models/gpt_neox_japanese/__init__.py index c43391c04958..d1960c043dee 100644 --- a/src/transformers/models/gpt_neox_japanese/__init__.py +++ b/src/transformers/models/gpt_neox_japanese/__init__.py @@ -13,48 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...file_utils import _LazyModule, is_torch_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = { - "configuration_gpt_neox_japanese": ["GPTNeoXJapaneseConfig"], - "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neox_japanese"] = [ - "GPTNeoXJapaneseForCausalLM", - "GPTNeoXJapaneseLayer", - "GPTNeoXJapaneseModel", - "GPTNeoXJapanesePreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig - from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neox_japanese import ( - GPTNeoXJapaneseForCausalLM, - GPTNeoXJapaneseLayer, - GPTNeoXJapaneseModel, - GPTNeoXJapanesePreTrainedModel, - ) - - + from .configuration_gpt_neox_japanese import * + from .modeling_gpt_neox_japanese import * + from .tokenization_gpt_neox_japanese import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py index d3c18a364327..6af25195777b 100644 --- a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py @@ -115,3 +115,6 @@ def __init__( self.use_cache = use_cache self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout + + +__all__ = ["GPTNeoXJapaneseConfig"] diff --git a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py index b9c4cad0fdc5..c21f947c9489 100755 --- a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py @@ -724,3 +724,6 @@ def _reorder_cache(self, past_key_values, beam_idx): + layer_past[2:], ) return reordered_past + + +__all__ = ["GPTNeoXJapanesePreTrainedModel", "GPTNeoXJapaneseModel", "GPTNeoXJapaneseForCausalLM"] diff --git a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py index 285dcb7d18e2..dbb084e930bd 100644 --- a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py @@ -349,3 +349,6 @@ def convert_id_to_token(self, index, breakline="\n"): words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) text = "".join(words) return text + + +__all__ = ["GPTNeoXJapaneseTokenizer"] diff --git a/src/transformers/models/gpt_sw3/__init__.py b/src/transformers/models/gpt_sw3/__init__.py index e7c08f0e27e7..a596dff3d225 100644 --- a/src/transformers/models/gpt_sw3/__init__.py +++ b/src/transformers/models/gpt_sw3/__init__.py @@ -11,33 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_gpt_sw3 import GPTSw3Tokenizer - + from .tokenization_gpt_sw3 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py b/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py index 262aeaba5eea..d520144b438a 100644 --- a/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py +++ b/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py @@ -10,6 +10,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging +from ...utils.import_utils import export if is_torch_available(): @@ -20,6 +21,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} +@export(backends=("sentencepiece",)) class GPTSw3Tokenizer(PreTrainedTokenizer): """ Construct an GPTSw3 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -294,3 +296,6 @@ def decode_fast(self, token_ids: Union[int, List[int]]) -> str: """ return self.sp_model.decode(token_ids) + + +__all__ = ["GPTSw3Tokenizer"] diff --git a/src/transformers/models/gptj/__init__.py b/src/transformers/models/gptj/__init__.py index 51520484529f..d899fbb32567 100644 --- a/src/transformers/models/gptj/__init__.py +++ b/src/transformers/models/gptj/__init__.py @@ -13,98 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = {"configuration_gptj": ["GPTJConfig", "GPTJOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gptj"] = [ - "GPTJForCausalLM", - "GPTJForQuestionAnswering", - "GPTJForSequenceClassification", - "GPTJModel", - "GPTJPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_gptj"] = [ - "TFGPTJForCausalLM", - "TFGPTJForQuestionAnswering", - "TFGPTJForSequenceClassification", - "TFGPTJModel", - "TFGPTJPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gptj"] = [ - "FlaxGPTJForCausalLM", - "FlaxGPTJModel", - "FlaxGPTJPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_gptj import GPTJConfig, GPTJOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gptj import ( - GPTJForCausalLM, - GPTJForQuestionAnswering, - GPTJForSequenceClassification, - GPTJModel, - GPTJPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_gptj import ( - TFGPTJForCausalLM, - TFGPTJForQuestionAnswering, - TFGPTJForSequenceClassification, - TFGPTJModel, - TFGPTJPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel - + from .configuration_gptj import * + from .modeling_flax_gptj import * + from .modeling_gptj import * + from .modeling_tf_gptj import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/gptj/configuration_gptj.py b/src/transformers/models/gptj/configuration_gptj.py index 1b93f259b05b..4c4764ea8703 100644 --- a/src/transformers/models/gptj/configuration_gptj.py +++ b/src/transformers/models/gptj/configuration_gptj.py @@ -17,10 +17,11 @@ from collections import OrderedDict from typing import Any, List, Mapping, Optional -from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec +from ...tokenization_utils import PreTrainedTokenizer, TensorType from ...utils import logging +from ...utils.import_utils import is_torch_available logger = logging.get_logger(__name__) @@ -214,3 +215,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["GPTJConfig", "GPTJOnnxConfig"] diff --git a/src/transformers/models/gptj/modeling_flax_gptj.py b/src/transformers/models/gptj/modeling_flax_gptj.py index 9f0d4d6e8600..ad2dfd3b3424 100644 --- a/src/transformers/models/gptj/modeling_flax_gptj.py +++ b/src/transformers/models/gptj/modeling_flax_gptj.py @@ -716,3 +716,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutput, _CONFIG_FOR_DOC, ) + +__all__ = ["FlaxGPTJPreTrainedModel", "FlaxGPTJModel", "FlaxGPTJForCausalLM"] diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index bd7ce5696fa0..bf040f45e064 100644 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -1468,3 +1468,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "GPTJPreTrainedModel", + "GPTJModel", + "GPTJForCausalLM", + "GPTJForSequenceClassification", + "GPTJForQuestionAnswering", +] diff --git a/src/transformers/models/gptj/modeling_tf_gptj.py b/src/transformers/models/gptj/modeling_tf_gptj.py index a931287adfcd..d23ab5fbe0d7 100644 --- a/src/transformers/models/gptj/modeling_tf_gptj.py +++ b/src/transformers/models/gptj/modeling_tf_gptj.py @@ -1096,3 +1096,12 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFGPTJPreTrainedModel", + "TFGPTJModel", + "TFGPTJForCausalLM", + "TFGPTJForSequenceClassification", + "TFGPTJForQuestionAnswering", +] diff --git a/src/transformers/models/grounding_dino/__init__.py b/src/transformers/models/grounding_dino/__init__.py index 7cd3e115e15d..15169ed7f8de 100644 --- a/src/transformers/models/grounding_dino/__init__.py +++ b/src/transformers/models/grounding_dino/__init__.py @@ -11,65 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_grounding_dino": ["GroundingDinoConfig"], - "processing_grounding_dino": ["GroundingDinoProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_grounding_dino"] = [ - "GroundingDinoForObjectDetection", - "GroundingDinoModel", - "GroundingDinoPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_grounding_dino"] = ["GroundingDinoImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_grounding_dino import ( - GroundingDinoConfig, - ) - from .processing_grounding_dino import GroundingDinoProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_grounding_dino import ( - GroundingDinoForObjectDetection, - GroundingDinoModel, - GroundingDinoPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_grounding_dino import GroundingDinoImageProcessor - + from .configuration_grounding_dino import * + from .image_processing_grounding_dino import * + from .modeling_grounding_dino import * + from .processing_grounding_dino import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/grounding_dino/configuration_grounding_dino.py b/src/transformers/models/grounding_dino/configuration_grounding_dino.py index 362e50a1c1cc..ca8960ee9a96 100644 --- a/src/transformers/models/grounding_dino/configuration_grounding_dino.py +++ b/src/transformers/models/grounding_dino/configuration_grounding_dino.py @@ -293,3 +293,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["GroundingDinoConfig"] diff --git a/src/transformers/models/grounding_dino/image_processing_grounding_dino.py b/src/transformers/models/grounding_dino/image_processing_grounding_dino.py index 569e22ba4700..d56080c567b7 100644 --- a/src/transformers/models/grounding_dino/image_processing_grounding_dino.py +++ b/src/transformers/models/grounding_dino/image_processing_grounding_dino.py @@ -63,6 +63,7 @@ is_vision_available, logging, ) +from ...utils.import_utils import export if is_torch_available(): @@ -806,6 +807,7 @@ def compute_segments( return segmentation, segments +@export(backends=("vision",)) class GroundingDinoImageProcessor(BaseImageProcessor): r""" Constructs a Grounding DINO image processor. @@ -1586,3 +1588,6 @@ def post_process_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["GroundingDinoImageProcessor"] diff --git a/src/transformers/models/grounding_dino/modeling_grounding_dino.py b/src/transformers/models/grounding_dino/modeling_grounding_dino.py index 3b298704de32..ca31daa7c263 100644 --- a/src/transformers/models/grounding_dino/modeling_grounding_dino.py +++ b/src/transformers/models/grounding_dino/modeling_grounding_dino.py @@ -3143,3 +3143,6 @@ def forward( ) return dict_outputs + + +__all__ = ["GroundingDinoPreTrainedModel", "GroundingDinoModel", "GroundingDinoForObjectDetection"] diff --git a/src/transformers/models/grounding_dino/processing_grounding_dino.py b/src/transformers/models/grounding_dino/processing_grounding_dino.py index 00c183338be0..1507683d8b6d 100644 --- a/src/transformers/models/grounding_dino/processing_grounding_dino.py +++ b/src/transformers/models/grounding_dino/processing_grounding_dino.py @@ -243,3 +243,6 @@ def post_process_grounded_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["GroundingDinoProcessor"] diff --git a/src/transformers/models/groupvit/__init__.py b/src/transformers/models/groupvit/__init__.py index 98fc6f4eccef..e0d45444542f 100644 --- a/src/transformers/models/groupvit/__init__.py +++ b/src/transformers/models/groupvit/__init__.py @@ -13,79 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_groupvit": [ - "GroupViTConfig", - "GroupViTOnnxConfig", - "GroupViTTextConfig", - "GroupViTVisionConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_groupvit"] = [ - "GroupViTModel", - "GroupViTPreTrainedModel", - "GroupViTTextModel", - "GroupViTVisionModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_groupvit"] = [ - "TFGroupViTModel", - "TFGroupViTPreTrainedModel", - "TFGroupViTTextModel", - "TFGroupViTVisionModel", - ] - if TYPE_CHECKING: - from .configuration_groupvit import ( - GroupViTConfig, - GroupViTOnnxConfig, - GroupViTTextConfig, - GroupViTVisionConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_groupvit import ( - GroupViTModel, - GroupViTPreTrainedModel, - GroupViTTextModel, - GroupViTVisionModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_groupvit import ( - TFGroupViTModel, - TFGroupViTPreTrainedModel, - TFGroupViTTextModel, - TFGroupViTVisionModel, - ) - + from .configuration_groupvit import * + from .modeling_groupvit import * + from .modeling_tf_groupvit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index e608fbcdbe9c..ccf2cffb3221 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -447,3 +447,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 14 + + +__all__ = ["GroupViTTextConfig", "GroupViTVisionConfig", "GroupViTConfig", "GroupViTOnnxConfig"] diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 3a2ccab8429e..80c8438bf9c8 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -1584,3 +1584,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", "GroupViTModel"] diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py index b5838a5264f6..8b17c36a699b 100644 --- a/src/transformers/models/groupvit/modeling_tf_groupvit.py +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -2136,3 +2136,6 @@ def build(self, input_shape=None): if getattr(self, "groupvit", None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) + + +__all__ = ["TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", "TFGroupViTModel"] diff --git a/src/transformers/models/herbert/__init__.py b/src/transformers/models/herbert/__init__.py index 54037995229f..7077017c94cd 100644 --- a/src/transformers/models/herbert/__init__.py +++ b/src/transformers/models/herbert/__init__.py @@ -11,35 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available - - -_import_structure = {"tokenization_herbert": ["HerbertTokenizer"]} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_herbert_fast"] = ["HerbertTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_herbert import HerbertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_herbert_fast import HerbertTokenizerFast - + from .tokenization_herbert import * + from .tokenization_herbert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/herbert/tokenization_herbert.py b/src/transformers/models/herbert/tokenization_herbert.py index bb078d4dde6d..806bf95af8d2 100644 --- a/src/transformers/models/herbert/tokenization_herbert.py +++ b/src/transformers/models/herbert/tokenization_herbert.py @@ -642,3 +642,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["HerbertTokenizer"] diff --git a/src/transformers/models/herbert/tokenization_herbert_fast.py b/src/transformers/models/herbert/tokenization_herbert_fast.py index 4cd5db58f1b9..6b2569307fe7 100644 --- a/src/transformers/models/herbert/tokenization_herbert_fast.py +++ b/src/transformers/models/herbert/tokenization_herbert_fast.py @@ -156,3 +156,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["HerbertTokenizerFast"] diff --git a/src/transformers/models/hubert/__init__.py b/src/transformers/models/hubert/__init__.py index 30331ed0d146..3aea6a4a4398 100644 --- a/src/transformers/models/hubert/__init__.py +++ b/src/transformers/models/hubert/__init__.py @@ -13,67 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_hubert": ["HubertConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_hubert"] = [ - "HubertForCTC", - "HubertForSequenceClassification", - "HubertModel", - "HubertPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_hubert"] = [ - "TFHubertForCTC", - "TFHubertModel", - "TFHubertPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_hubert import HubertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_hubert import ( - HubertForCTC, - HubertForSequenceClassification, - HubertModel, - HubertPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_hubert import ( - TFHubertForCTC, - TFHubertModel, - TFHubertPreTrainedModel, - ) - - + from .configuration_hubert import * + from .modeling_hubert import * + from .modeling_tf_hubert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/hubert/configuration_hubert.py b/src/transformers/models/hubert/configuration_hubert.py index 20977cff87d1..0855d606b60e 100644 --- a/src/transformers/models/hubert/configuration_hubert.py +++ b/src/transformers/models/hubert/configuration_hubert.py @@ -256,3 +256,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["HubertConfig"] diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index da79c2894877..ce9159635571 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -1645,3 +1645,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["HubertPreTrainedModel", "HubertModel", "HubertForCTC", "HubertForSequenceClassification"] diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index 2adfeea5b8b8..d1ec2d7c7023 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -1670,3 +1670,6 @@ def build(self, input_shape=None): if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.output_hidden_size]) + + +__all__ = ["TFHubertPreTrainedModel", "TFHubertModel", "TFHubertForCTC"] diff --git a/src/transformers/models/ibert/__init__.py b/src/transformers/models/ibert/__init__.py index 3b147e414c2e..4d040801735f 100644 --- a/src/transformers/models/ibert/__init__.py +++ b/src/transformers/models/ibert/__init__.py @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_ibert": ["IBertConfig", "IBertOnnxConfig"]} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_ibert"] = [ - "IBertForMaskedLM", - "IBertForMultipleChoice", - "IBertForQuestionAnswering", - "IBertForSequenceClassification", - "IBertForTokenClassification", - "IBertModel", - "IBertPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_ibert import IBertConfig, IBertOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_ibert import ( - IBertForMaskedLM, - IBertForMultipleChoice, - IBertForQuestionAnswering, - IBertForSequenceClassification, - IBertForTokenClassification, - IBertModel, - IBertPreTrainedModel, - ) - + from .configuration_ibert import * + from .modeling_ibert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/ibert/configuration_ibert.py b/src/transformers/models/ibert/configuration_ibert.py index 9af660669d05..6ddc344b9e10 100644 --- a/src/transformers/models/ibert/configuration_ibert.py +++ b/src/transformers/models/ibert/configuration_ibert.py @@ -137,3 +137,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["IBertConfig", "IBertOnnxConfig"] diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index 311bb4a39fb7..03e82b475340 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -1353,3 +1353,14 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "IBertPreTrainedModel", + "IBertModel", + "IBertForMaskedLM", + "IBertForSequenceClassification", + "IBertForMultipleChoice", + "IBertForTokenClassification", + "IBertForQuestionAnswering", +] diff --git a/src/transformers/models/idefics/__init__.py b/src/transformers/models/idefics/__init__.py index 3b32064789ca..f92daea8ab8f 100644 --- a/src/transformers/models/idefics/__init__.py +++ b/src/transformers/models/idefics/__init__.py @@ -13,87 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_idefics": ["IdeficsConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_idefics"] = ["IdeficsImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_idefics"] = [ - "IdeficsForVisionText2Text", - "IdeficsModel", - "IdeficsPreTrainedModel", - ] - _import_structure["processing_idefics"] = ["IdeficsProcessor"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_idefics"] = [ - "TFIdeficsForVisionText2Text", - "TFIdeficsModel", - "TFIdeficsPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_idefics import IdeficsConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_idefics import IdeficsImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_idefics import ( - IdeficsForVisionText2Text, - IdeficsModel, - IdeficsPreTrainedModel, - ) - from .processing_idefics import IdeficsProcessor - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_idefics import ( - TFIdeficsForVisionText2Text, - TFIdeficsModel, - TFIdeficsPreTrainedModel, - ) - + from .configuration_idefics import * + from .image_processing_idefics import * + from .modeling_idefics import * + from .modeling_tf_idefics import * + from .processing_idefics import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/idefics/configuration_idefics.py b/src/transformers/models/idefics/configuration_idefics.py index 56b6025a8e89..abb049a7e9b0 100644 --- a/src/transformers/models/idefics/configuration_idefics.py +++ b/src/transformers/models/idefics/configuration_idefics.py @@ -322,3 +322,6 @@ def __init__( # updates the config object with `kwargs` from from_pretrained, so during the instantiation # of this object many attributes have default values and haven't yet been overridden. # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run. + + +__all__ = ["IdeficsConfig"] diff --git a/src/transformers/models/idefics/image_processing_idefics.py b/src/transformers/models/idefics/image_processing_idefics.py index f4998020daf6..ed077df254bf 100644 --- a/src/transformers/models/idefics/image_processing_idefics.py +++ b/src/transformers/models/idefics/image_processing_idefics.py @@ -29,6 +29,7 @@ valid_images, ) from ...utils import TensorType, is_torch_available +from ...utils.import_utils import export IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073] @@ -48,6 +49,7 @@ def convert_to_rgb(image): return alpha_composite +@export(backends=("vision",)) class IdeficsImageProcessor(BaseImageProcessor): r""" Constructs a Idefics image processor. @@ -166,3 +168,6 @@ def preprocess( images = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)["pixel_values"] return images + + +__all__ = ["IdeficsImageProcessor"] diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index bd0cbc7fe86e..d13916b29654 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -28,12 +28,11 @@ from torch import nn from torch.nn import CrossEntropyLoss -from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ModelOutput -from ...modeling_utils import PretrainedConfig +from ...modeling_utils import PretrainedConfig, PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import ( add_start_docstrings, @@ -1753,3 +1752,6 @@ def _reorder_cache(past, beam_idx): for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past + + +__all__ = ["IdeficsPreTrainedModel", "IdeficsModel", "IdeficsForVisionText2Text"] diff --git a/src/transformers/models/idefics/modeling_tf_idefics.py b/src/transformers/models/idefics/modeling_tf_idefics.py index c5ce2935d331..a04ff2152392 100644 --- a/src/transformers/models/idefics/modeling_tf_idefics.py +++ b/src/transformers/models/idefics/modeling_tf_idefics.py @@ -1810,3 +1810,6 @@ def build(self, input_shape=None): if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) + + +__all__ = ["TFIdeficsPreTrainedModel", "TFIdeficsMainLayer", "TFIdeficsModel", "TFIdeficsForVisionText2Text"] diff --git a/src/transformers/models/idefics/processing_idefics.py b/src/transformers/models/idefics/processing_idefics.py index 8e9e196764f9..a6f014f961d4 100644 --- a/src/transformers/models/idefics/processing_idefics.py +++ b/src/transformers/models/idefics/processing_idefics.py @@ -491,3 +491,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["IdeficsProcessor"] diff --git a/src/transformers/models/idefics2/__init__.py b/src/transformers/models/idefics2/__init__.py index 1d8d3e4b571d..b76a53a081fe 100644 --- a/src/transformers/models/idefics2/__init__.py +++ b/src/transformers/models/idefics2/__init__.py @@ -13,60 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_idefics2": ["Idefics2Config"]} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_idefics2"] = ["Idefics2ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_idefics2"] = [ - "Idefics2ForConditionalGeneration", - "Idefics2PreTrainedModel", - "Idefics2Model", - ] - _import_structure["processing_idefics2"] = ["Idefics2Processor"] - if TYPE_CHECKING: - from .configuration_idefics2 import Idefics2Config - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_idefics2 import Idefics2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_idefics2 import ( - Idefics2ForConditionalGeneration, - Idefics2Model, - Idefics2PreTrainedModel, - ) - from .processing_idefics2 import Idefics2Processor - - + from .configuration_idefics2 import * + from .image_processing_idefics2 import * + from .modeling_idefics2 import * + from .processing_idefics2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/idefics2/configuration_idefics2.py b/src/transformers/models/idefics2/configuration_idefics2.py index 1333895407e6..b0d3192805cb 100644 --- a/src/transformers/models/idefics2/configuration_idefics2.py +++ b/src/transformers/models/idefics2/configuration_idefics2.py @@ -260,3 +260,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings) + + +__all__ = ["Idefics2Config"] diff --git a/src/transformers/models/idefics2/image_processing_idefics2.py b/src/transformers/models/idefics2/image_processing_idefics2.py index ac9df68871ee..9995a20f70fc 100644 --- a/src/transformers/models/idefics2/image_processing_idefics2.py +++ b/src/transformers/models/idefics2/image_processing_idefics2.py @@ -35,6 +35,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -178,6 +179,7 @@ def convert_to_rgb(image: ImageInput) -> ImageInput: return alpha_composite +@export(backends=("vision",)) class Idefics2ImageProcessor(BaseImageProcessor): r""" Constructs a Idefics image processor. @@ -594,3 +596,6 @@ def preprocess( data["pixel_attention_mask"] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["Idefics2ImageProcessor"] diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index f57bdd27fee6..b7b9ceb718a6 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -23,11 +23,11 @@ from torch import nn from torch.nn import CrossEntropyLoss -from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -1730,3 +1730,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["Idefics2PreTrainedModel", "Idefics2Model", "Idefics2ForConditionalGeneration"] diff --git a/src/transformers/models/idefics2/processing_idefics2.py b/src/transformers/models/idefics2/processing_idefics2.py index 2e14118144ba..57227696f57d 100644 --- a/src/transformers/models/idefics2/processing_idefics2.py +++ b/src/transformers/models/idefics2/processing_idefics2.py @@ -251,3 +251,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Idefics2Processor"] diff --git a/src/transformers/models/imagegpt/__init__.py b/src/transformers/models/imagegpt/__init__.py index a64dd9affdbe..aca58f6e25f8 100644 --- a/src/transformers/models/imagegpt/__init__.py +++ b/src/transformers/models/imagegpt/__init__.py @@ -11,65 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_imagegpt": ["ImageGPTConfig", "ImageGPTOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_imagegpt"] = ["ImageGPTFeatureExtractor"] - _import_structure["image_processing_imagegpt"] = ["ImageGPTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_imagegpt"] = [ - "ImageGPTForCausalImageModeling", - "ImageGPTForImageClassification", - "ImageGPTModel", - "ImageGPTPreTrainedModel", - "load_tf_weights_in_imagegpt", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_imagegpt import ImageGPTConfig, ImageGPTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_imagegpt import ImageGPTFeatureExtractor - from .image_processing_imagegpt import ImageGPTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_imagegpt import ( - ImageGPTForCausalImageModeling, - ImageGPTForImageClassification, - ImageGPTModel, - ImageGPTPreTrainedModel, - load_tf_weights_in_imagegpt, - ) - + from .configuration_imagegpt import * + from .feature_extraction_imagegpt import * + from .image_processing_imagegpt import * + from .modeling_imagegpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/imagegpt/configuration_imagegpt.py b/src/transformers/models/imagegpt/configuration_imagegpt.py index c54c11491cb5..bf151817bbb5 100644 --- a/src/transformers/models/imagegpt/configuration_imagegpt.py +++ b/src/transformers/models/imagegpt/configuration_imagegpt.py @@ -23,7 +23,7 @@ if TYPE_CHECKING: - from ... import FeatureExtractionMixin, TensorType + from ...feature_extraction_utils import FeatureExtractionMixin, TensorType logger = logging.get_logger(__name__) @@ -194,3 +194,6 @@ def generate_dummy_inputs( inputs = dict(preprocessor(images=input_image, return_tensors=framework)) return inputs + + +__all__ = ["ImageGPTConfig", "ImageGPTOnnxConfig"] diff --git a/src/transformers/models/imagegpt/feature_extraction_imagegpt.py b/src/transformers/models/imagegpt/feature_extraction_imagegpt.py index 1780926bbf24..f213e38c559a 100644 --- a/src/transformers/models/imagegpt/feature_extraction_imagegpt.py +++ b/src/transformers/models/imagegpt/feature_extraction_imagegpt.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_imagegpt import ImageGPTImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ImageGPTFeatureExtractor(ImageGPTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ImageGPTFeatureExtractor"] diff --git a/src/transformers/models/imagegpt/image_processing_imagegpt.py b/src/transformers/models/imagegpt/image_processing_imagegpt.py index 47fb0f6056ed..8bd4d329bb2a 100644 --- a/src/transformers/models/imagegpt/image_processing_imagegpt.py +++ b/src/transformers/models/imagegpt/image_processing_imagegpt.py @@ -32,6 +32,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -56,6 +57,7 @@ def color_quantize(x, clusters): return np.argmin(d, axis=1) +@export(backends=("vision",)) class ImageGPTImageProcessor(BaseImageProcessor): r""" Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution @@ -297,3 +299,6 @@ def preprocess( data = {"input_ids": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ImageGPTImageProcessor"] diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index 5d59a4ed90e4..1307883a26e8 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -1202,3 +1202,12 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_imagegpt", + "ImageGPTPreTrainedModel", + "ImageGPTModel", + "ImageGPTForCausalImageModeling", + "ImageGPTForImageClassification", +] diff --git a/src/transformers/models/informer/__init__.py b/src/transformers/models/informer/__init__.py index fba309ee2b52..6d546fda3d2b 100644 --- a/src/transformers/models/informer/__init__.py +++ b/src/transformers/models/informer/__init__.py @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_informer": ["InformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_informer"] = [ - "InformerForPrediction", - "InformerModel", - "InformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_informer import InformerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_informer import ( - InformerForPrediction, - InformerModel, - InformerPreTrainedModel, - ) - + from .configuration_informer import * + from .modeling_informer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/informer/configuration_informer.py b/src/transformers/models/informer/configuration_informer.py index d933ac6fd530..028f5b32295e 100644 --- a/src/transformers/models/informer/configuration_informer.py +++ b/src/transformers/models/informer/configuration_informer.py @@ -244,3 +244,6 @@ def _number_of_features(self) -> int: + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features ) + + +__all__ = ["InformerConfig"] diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index 6b5507a01559..a46babe3af08 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -2041,3 +2041,6 @@ def generate( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) ) + + +__all__ = ["InformerPreTrainedModel", "InformerModel", "InformerForPrediction"] diff --git a/src/transformers/models/instructblip/__init__.py b/src/transformers/models/instructblip/__init__.py index 093b9f00f6fc..43cd137c058f 100644 --- a/src/transformers/models/instructblip/__init__.py +++ b/src/transformers/models/instructblip/__init__.py @@ -13,53 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_instructblip": [ - "InstructBlipConfig", - "InstructBlipQFormerConfig", - "InstructBlipVisionConfig", - ], - "processing_instructblip": ["InstructBlipProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_instructblip"] = [ - "InstructBlipQFormerModel", - "InstructBlipPreTrainedModel", - "InstructBlipForConditionalGeneration", - "InstructBlipVisionModel", - ] - if TYPE_CHECKING: - from .configuration_instructblip import ( - InstructBlipConfig, - InstructBlipQFormerConfig, - InstructBlipVisionConfig, - ) - from .processing_instructblip import InstructBlipProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_instructblip import ( - InstructBlipForConditionalGeneration, - InstructBlipPreTrainedModel, - InstructBlipQFormerModel, - InstructBlipVisionModel, - ) - + from .configuration_instructblip import * + from .modeling_instructblip import * + from .processing_instructblip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/instructblip/configuration_instructblip.py b/src/transformers/models/instructblip/configuration_instructblip.py index a274212a945e..bf76c9f11896 100644 --- a/src/transformers/models/instructblip/configuration_instructblip.py +++ b/src/transformers/models/instructblip/configuration_instructblip.py @@ -366,3 +366,6 @@ def from_vision_qformer_text_configs( text_config=text_config.to_dict(), **kwargs, ) + + +__all__ = ["InstructBlipVisionConfig", "InstructBlipQFormerConfig", "InstructBlipConfig"] diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index ba77afe9f7c2..c318295610fe 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -1652,3 +1652,11 @@ def generate( outputs = torch.cat([bos_tokens, outputs], dim=-1) return outputs + + +__all__ = [ + "InstructBlipPreTrainedModel", + "InstructBlipVisionModel", + "InstructBlipQFormerModel", + "InstructBlipForConditionalGeneration", +] diff --git a/src/transformers/models/instructblip/processing_instructblip.py b/src/transformers/models/instructblip/processing_instructblip.py index e3251395a781..56130af48c93 100644 --- a/src/transformers/models/instructblip/processing_instructblip.py +++ b/src/transformers/models/instructblip/processing_instructblip.py @@ -227,3 +227,6 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="qformer_tokenizer") processor.qformer_tokenizer = qformer_tokenizer return processor + + +__all__ = ["InstructBlipProcessor"] diff --git a/src/transformers/models/jamba/__init__.py b/src/transformers/models/jamba/__init__.py index f6b7c2137b20..8789007ad0bc 100644 --- a/src/transformers/models/jamba/__init__.py +++ b/src/transformers/models/jamba/__init__.py @@ -13,46 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_jamba": ["JambaConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_jamba"] = [ - "JambaForCausalLM", - "JambaForSequenceClassification", - "JambaModel", - "JambaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_jamba import JambaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_jamba import ( - JambaForCausalLM, - JambaForSequenceClassification, - JambaModel, - JambaPreTrainedModel, - ) - - + from .configuration_jamba import * + from .modeling_jamba import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/jamba/configuration_jamba.py b/src/transformers/models/jamba/configuration_jamba.py index 58c8a685feab..92cec746f46c 100644 --- a/src/transformers/models/jamba/configuration_jamba.py +++ b/src/transformers/models/jamba/configuration_jamba.py @@ -222,3 +222,6 @@ def layers_num_experts(self): self.num_experts if i % self.expert_layer_period == self.expert_layer_offset else 1 for i in range(self.num_hidden_layers) ] + + +__all__ = ["JambaConfig"] diff --git a/src/transformers/models/jamba/modeling_jamba.py b/src/transformers/models/jamba/modeling_jamba.py index 60e1670a3c27..d9dd8d57ef4d 100755 --- a/src/transformers/models/jamba/modeling_jamba.py +++ b/src/transformers/models/jamba/modeling_jamba.py @@ -1216,6 +1216,7 @@ def _init_weights(self, module): JAMBA_START_DOCSTRING, ) # Adapted from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->JAMBA, Mistral->Jamba + class JambaModel(JambaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`JambaDecoderLayer`] @@ -1424,6 +1425,8 @@ def _update_mamba_mask(self, attention_mask, cache_position): # Adapted from transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM with MIXTRAL->JAMBA, Mixtral->Jamba + + class JambaForCausalLM(JambaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] @@ -1761,3 +1764,6 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = ["JambaPreTrainedModel", "JambaModel", "JambaForCausalLM", "JambaForSequenceClassification"] diff --git a/src/transformers/models/kosmos2/__init__.py b/src/transformers/models/kosmos2/__init__.py index 171a5cc7071e..6f1927670328 100644 --- a/src/transformers/models/kosmos2/__init__.py +++ b/src/transformers/models/kosmos2/__init__.py @@ -14,49 +14,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_kosmos2": ["Kosmos2Config"], - "processing_kosmos2": ["Kosmos2Processor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_kosmos2"] = [ - "Kosmos2ForConditionalGeneration", - "Kosmos2Model", - "Kosmos2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_kosmos2 import Kosmos2Config - from .processing_kosmos2 import Kosmos2Processor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_kosmos2 import ( - Kosmos2ForConditionalGeneration, - Kosmos2Model, - Kosmos2PreTrainedModel, - ) - + from .configuration_kosmos2 import * + from .modeling_kosmos2 import * + from .processing_kosmos2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/kosmos2/configuration_kosmos2.py b/src/transformers/models/kosmos2/configuration_kosmos2.py index e49074f8061b..50a09d3b5b1e 100644 --- a/src/transformers/models/kosmos2/configuration_kosmos2.py +++ b/src/transformers/models/kosmos2/configuration_kosmos2.py @@ -290,3 +290,6 @@ def __init__( self.vision_config = Kosmos2VisionConfig(**vision_config) self.latent_query_num = latent_query_num + + +__all__ = ["Kosmos2Config"] diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 69641790b2db..90728e18e9a3 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -2048,3 +2048,6 @@ def generate( ) return output + + +__all__ = ["Kosmos2PreTrainedModel", "Kosmos2Model", "Kosmos2ForConditionalGeneration"] diff --git a/src/transformers/models/kosmos2/processing_kosmos2.py b/src/transformers/models/kosmos2/processing_kosmos2.py index 7f54ac3b44bd..29d141e4f462 100644 --- a/src/transformers/models/kosmos2/processing_kosmos2.py +++ b/src/transformers/models/kosmos2/processing_kosmos2.py @@ -665,3 +665,6 @@ def clean_text_and_extract_entities_with_bboxes(text, num_patches_per_side=32): entities.append(adjusted_entity + (bboxes_in_coords,)) return _cleanup_spaces(processed_text, entities) + + +__all__ = ["Kosmos2Processor"] diff --git a/src/transformers/models/layoutlm/__init__.py b/src/transformers/models/layoutlm/__init__.py index 070b42368ef9..f6bbf21918cc 100644 --- a/src/transformers/models/layoutlm/__init__.py +++ b/src/transformers/models/layoutlm/__init__.py @@ -11,106 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_layoutlm": ["LayoutLMConfig", "LayoutLMOnnxConfig"], - "tokenization_layoutlm": ["LayoutLMTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlm_fast"] = ["LayoutLMTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlm"] = [ - "LayoutLMForMaskedLM", - "LayoutLMForSequenceClassification", - "LayoutLMForTokenClassification", - "LayoutLMForQuestionAnswering", - "LayoutLMModel", - "LayoutLMPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_layoutlm"] = [ - "TFLayoutLMForMaskedLM", - "TFLayoutLMForSequenceClassification", - "TFLayoutLMForTokenClassification", - "TFLayoutLMForQuestionAnswering", - "TFLayoutLMMainLayer", - "TFLayoutLMModel", - "TFLayoutLMPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_layoutlm import LayoutLMConfig, LayoutLMOnnxConfig - from .tokenization_layoutlm import LayoutLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlm_fast import LayoutLMTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlm import ( - LayoutLMForMaskedLM, - LayoutLMForQuestionAnswering, - LayoutLMForSequenceClassification, - LayoutLMForTokenClassification, - LayoutLMModel, - LayoutLMPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_layoutlm import ( - TFLayoutLMForMaskedLM, - TFLayoutLMForQuestionAnswering, - TFLayoutLMForSequenceClassification, - TFLayoutLMForTokenClassification, - TFLayoutLMMainLayer, - TFLayoutLMModel, - TFLayoutLMPreTrainedModel, - ) - + from .configuration_layoutlm import * + from .modeling_layoutlm import * + from .modeling_tf_layoutlm import * + from .tokenization_layoutlm import * + from .tokenization_layoutlm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutlm/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py index 4198bb26e979..efed250428bb 100644 --- a/src/transformers/models/layoutlm/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -17,8 +17,9 @@ from collections import OrderedDict from typing import Any, List, Mapping, Optional -from ... import PretrainedConfig, PreTrainedTokenizer +from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, PatchingSpec +from ...tokenization_utils import PreTrainedTokenizer from ...utils import TensorType, is_torch_available, logging @@ -194,3 +195,6 @@ def generate_dummy_inputs( batch_size, seq_length = input_dict["input_ids"].shape input_dict["bbox"] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1) return input_dict + + +__all__ = ["LayoutLMConfig", "LayoutLMOnnxConfig"] diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 55e17bfc586d..60c0f916ec32 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -1373,3 +1373,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LayoutLMPreTrainedModel", + "LayoutLMModel", + "LayoutLMForMaskedLM", + "LayoutLMForSequenceClassification", + "LayoutLMForTokenClassification", + "LayoutLMForQuestionAnswering", +] diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index 59aebe15b5d5..ea121233ec8f 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -1679,3 +1679,14 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFLayoutLMPreTrainedModel", + "TFLayoutLMModel", + "TFLayoutLMForMaskedLM", + "TFLayoutLMForSequenceClassification", + "TFLayoutLMForTokenClassification", + "TFLayoutLMForQuestionAnswering", + "TFLayoutLMMainLayer", +] diff --git a/src/transformers/models/layoutlm/tokenization_layoutlm.py b/src/transformers/models/layoutlm/tokenization_layoutlm.py index b0a57dac5fda..3927ed01e087 100644 --- a/src/transformers/models/layoutlm/tokenization_layoutlm.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm.py @@ -502,3 +502,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["LayoutLMTokenizer"] diff --git a/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py index db1409dfcab1..1df94cb625a3 100644 --- a/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py @@ -171,3 +171,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["LayoutLMTokenizerFast"] diff --git a/src/transformers/models/layoutlmv2/__init__.py b/src/transformers/models/layoutlmv2/__init__.py index 1c45a9f76abb..ec6213b3bff0 100644 --- a/src/transformers/models/layoutlmv2/__init__.py +++ b/src/transformers/models/layoutlmv2/__init__.py @@ -11,92 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_layoutlmv2": ["LayoutLMv2Config"], - "processing_layoutlmv2": ["LayoutLMv2Processor"], - "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlmv2_fast"] = ["LayoutLMv2TokenizerFast"] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_layoutlmv2"] = ["LayoutLMv2FeatureExtractor"] - _import_structure["image_processing_layoutlmv2"] = ["LayoutLMv2ImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlmv2"] = [ - "LayoutLMv2ForQuestionAnswering", - "LayoutLMv2ForSequenceClassification", - "LayoutLMv2ForTokenClassification", - "LayoutLMv2Layer", - "LayoutLMv2Model", - "LayoutLMv2PreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_layoutlmv2 import LayoutLMv2Config - from .processing_layoutlmv2 import LayoutLMv2Processor - from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlmv2 import ( - LayoutLMv2ForQuestionAnswering, - LayoutLMv2ForSequenceClassification, - LayoutLMv2ForTokenClassification, - LayoutLMv2Layer, - LayoutLMv2Model, - LayoutLMv2PreTrainedModel, - ) + from .configuration_layoutlmv2 import * + from .feature_extraction_layoutlmv2 import * + from .image_processing_layoutlmv2 import * + from .modeling_layoutlmv2 import * + from .processing_layoutlmv2 import * + from .tokenization_layoutlmv2 import * + from .tokenization_layoutlmv2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py index d2a9d37bd12a..6ffdec66385c 100644 --- a/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/configuration_layoutlmv2.py @@ -217,3 +217,6 @@ def get_detectron2_config(self): setattr(to_set, attributes[-1], v) return detectron2_config + + +__all__ = ["LayoutLMv2Config"] diff --git a/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py b/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py index eb1042b7c284..0db0df31e934 100644 --- a/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py @@ -19,12 +19,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_layoutlmv2 import LayoutLMv2ImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class LayoutLMv2FeatureExtractor(LayoutLMv2ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -33,3 +35,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["LayoutLMv2FeatureExtractor"] diff --git a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py index c47d58c30c01..22c83c566696 100644 --- a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py @@ -38,6 +38,7 @@ logging, requires_backends, ) +from ...utils.import_utils import export if is_vision_available(): @@ -98,6 +99,7 @@ def apply_tesseract( return words, normalized_boxes +@export(backends=("vision",)) class LayoutLMv2ImageProcessor(BaseImageProcessor): r""" Constructs a LayoutLMv2 image processor. @@ -296,3 +298,6 @@ def preprocess( data["words"] = words_batch data["boxes"] = boxes_batch return data + + +__all__ = ["LayoutLMv2ImageProcessor"] diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index 50ef27be3f52..da43dcd213ca 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -1415,3 +1415,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LayoutLMv2PreTrainedModel", + "LayoutLMv2Model", + "LayoutLMv2ForSequenceClassification", + "LayoutLMv2ForTokenClassification", + "LayoutLMv2ForQuestionAnswering", +] diff --git a/src/transformers/models/layoutlmv2/processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/processing_layoutlmv2.py index 1edf87465bbf..39d34b3a9975 100644 --- a/src/transformers/models/layoutlmv2/processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/processing_layoutlmv2.py @@ -199,3 +199,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["LayoutLMv2Processor"] diff --git a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py index fe0305562374..f0ac2e53869a 100644 --- a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py @@ -1540,3 +1540,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["LayoutLMv2Tokenizer"] diff --git a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py index aa2bf6b3226b..e6fb2ac0e32d 100644 --- a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py +++ b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py @@ -791,3 +791,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["LayoutLMv2TokenizerFast"] diff --git a/src/transformers/models/layoutlmv3/__init__.py b/src/transformers/models/layoutlmv3/__init__.py index a8ef90906e7a..0fb171e690ff 100644 --- a/src/transformers/models/layoutlmv3/__init__.py +++ b/src/transformers/models/layoutlmv3/__init__.py @@ -11,128 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_layoutlmv3": [ - "LayoutLMv3Config", - "LayoutLMv3OnnxConfig", - ], - "processing_layoutlmv3": ["LayoutLMv3Processor"], - "tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutlmv3_fast"] = ["LayoutLMv3TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_layoutlmv3"] = [ - "LayoutLMv3ForQuestionAnswering", - "LayoutLMv3ForSequenceClassification", - "LayoutLMv3ForTokenClassification", - "LayoutLMv3Model", - "LayoutLMv3PreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_layoutlmv3"] = [ - "TFLayoutLMv3ForQuestionAnswering", - "TFLayoutLMv3ForSequenceClassification", - "TFLayoutLMv3ForTokenClassification", - "TFLayoutLMv3Model", - "TFLayoutLMv3PreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_layoutlmv3"] = ["LayoutLMv3FeatureExtractor"] - _import_structure["image_processing_layoutlmv3"] = ["LayoutLMv3ImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_layoutlmv3 import ( - LayoutLMv3Config, - LayoutLMv3OnnxConfig, - ) - from .processing_layoutlmv3 import LayoutLMv3Processor - from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_layoutlmv3 import ( - LayoutLMv3ForQuestionAnswering, - LayoutLMv3ForSequenceClassification, - LayoutLMv3ForTokenClassification, - LayoutLMv3Model, - LayoutLMv3PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_layoutlmv3 import ( - TFLayoutLMv3ForQuestionAnswering, - TFLayoutLMv3ForSequenceClassification, - TFLayoutLMv3ForTokenClassification, - TFLayoutLMv3Model, - TFLayoutLMv3PreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_layoutlmv3 import LayoutLMv3FeatureExtractor - from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor - + from .configuration_layoutlmv3 import * + from .feature_extraction_layoutlmv3 import * + from .image_processing_layoutlmv3 import * + from .modeling_layoutlmv3 import * + from .modeling_tf_layoutlmv3 import * + from .processing_layoutlmv3 import * + from .tokenization_layoutlmv3 import * + from .tokenization_layoutlmv3_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py index aa50a3228e86..a13d7fa7cded 100644 --- a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py @@ -288,3 +288,6 @@ def generate_dummy_inputs( ) return inputs + + +__all__ = ["LayoutLMv3Config", "LayoutLMv3OnnxConfig"] diff --git a/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py b/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py index e120a0ebd07a..956305b43ecb 100644 --- a/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py @@ -19,12 +19,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class LayoutLMv3FeatureExtractor(LayoutLMv3ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -33,3 +35,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["LayoutLMv3FeatureExtractor"] diff --git a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py index 6f16435c14dd..1e4bae9f36c2 100644 --- a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py @@ -41,6 +41,7 @@ logging, requires_backends, ) +from ...utils.import_utils import export if is_vision_available(): @@ -100,6 +101,7 @@ def apply_tesseract( return words, normalized_boxes +@export(backends=("vision",)) class LayoutLMv3ImageProcessor(BaseImageProcessor): r""" Constructs a LayoutLMv3 image processor. @@ -372,3 +374,6 @@ def preprocess( data["words"] = words_batch data["boxes"] = boxes_batch return data + + +__all__ = ["LayoutLMv3ImageProcessor"] diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index 629490350c7d..f93838412f16 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -1382,3 +1382,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LayoutLMv3PreTrainedModel", + "LayoutLMv3Model", + "LayoutLMv3ForTokenClassification", + "LayoutLMv3ForQuestionAnswering", + "LayoutLMv3ForSequenceClassification", +] diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 574e14cc9108..bf1fef406539 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -1772,3 +1772,13 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build(None) + + +__all__ = [ + "TFLayoutLMv3PreTrainedModel", + "TFLayoutLMv3Model", + "TFLayoutLMv3ForSequenceClassification", + "TFLayoutLMv3ForTokenClassification", + "TFLayoutLMv3ForQuestionAnswering", + "TFLayoutLMv3MainLayer", +] diff --git a/src/transformers/models/layoutlmv3/processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/processing_layoutlmv3.py index 369bd51bec28..4bd9955775d1 100644 --- a/src/transformers/models/layoutlmv3/processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/processing_layoutlmv3.py @@ -197,3 +197,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["LayoutLMv3Processor"] diff --git a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py index 89f899f22f4e..92a24f44f40b 100644 --- a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py @@ -1459,3 +1459,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["LayoutLMv3Tokenizer"] diff --git a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py index 07bedf36133a..d8f158549367 100644 --- a/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py +++ b/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py @@ -835,3 +835,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["LayoutLMv3TokenizerFast"] diff --git a/src/transformers/models/layoutxlm/__init__.py b/src/transformers/models/layoutxlm/__init__.py index e3885d381f9c..d7f9717e1420 100644 --- a/src/transformers/models/layoutxlm/__init__.py +++ b/src/transformers/models/layoutxlm/__init__.py @@ -11,57 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: - from .processing_layoutxlm import LayoutXLMProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutxlm import LayoutXLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast - + from .processing_layoutxlm import * + from .tokenization_layoutxlm import * + from .tokenization_layoutxlm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/layoutxlm/processing_layoutxlm.py b/src/transformers/models/layoutxlm/processing_layoutxlm.py index 1cbd3f20c2fa..a8881c634c24 100644 --- a/src/transformers/models/layoutxlm/processing_layoutxlm.py +++ b/src/transformers/models/layoutxlm/processing_layoutxlm.py @@ -199,3 +199,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["LayoutXLMProcessor"] diff --git a/src/transformers/models/layoutxlm/tokenization_layoutxlm.py b/src/transformers/models/layoutxlm/tokenization_layoutxlm.py index 3ab57ac892aa..e9ed0d7051dc 100644 --- a/src/transformers/models/layoutxlm/tokenization_layoutxlm.py +++ b/src/transformers/models/layoutxlm/tokenization_layoutxlm.py @@ -30,6 +30,7 @@ TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging +from ...utils.import_utils import export from ..xlm_roberta.tokenization_xlm_roberta import ( SPIECE_UNDERLINE, VOCAB_FILES_NAMES, @@ -143,6 +144,7 @@ """ +@export(backends=("sentencepiece",)) class LayoutXLMTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on @@ -1167,3 +1169,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["LayoutXLMTokenizer"] diff --git a/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py b/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py index 6d68cb9f18e7..758f9df0b816 100644 --- a/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py +++ b/src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py @@ -802,3 +802,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["LayoutXLMTokenizerFast"] diff --git a/src/transformers/models/led/__init__.py b/src/transformers/models/led/__init__.py index 2dbd59dcc347..e0e52fe486da 100644 --- a/src/transformers/models/led/__init__.py +++ b/src/transformers/models/led/__init__.py @@ -13,87 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_led": ["LEDConfig"], - "tokenization_led": ["LEDTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_led_fast"] = ["LEDTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_led"] = [ - "LEDForConditionalGeneration", - "LEDForQuestionAnswering", - "LEDForSequenceClassification", - "LEDModel", - "LEDPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_led"] = ["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_led import LEDConfig - from .tokenization_led import LEDTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_led_fast import LEDTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_led import ( - LEDForConditionalGeneration, - LEDForQuestionAnswering, - LEDForSequenceClassification, - LEDModel, - LEDPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel - + from .configuration_led import * + from .modeling_led import * + from .modeling_tf_led import * + from .tokenization_led import * + from .tokenization_led_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/led/configuration_led.py b/src/transformers/models/led/configuration_led.py index 9ed3b148c739..d51c0dc4aa32 100644 --- a/src/transformers/models/led/configuration_led.py +++ b/src/transformers/models/led/configuration_led.py @@ -160,3 +160,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["LEDConfig"] diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 41b6c0a2bea2..27fca63a26d0 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -2741,3 +2741,12 @@ def forward( encoder_attentions=outputs.encoder_attentions, encoder_global_attentions=outputs.encoder_global_attentions, ) + + +__all__ = [ + "LEDPreTrainedModel", + "LEDModel", + "LEDForConditionalGeneration", + "LEDForSequenceClassification", + "LEDForQuestionAnswering", +] diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 8c414648d69e..cfec7fb4de4a 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -2661,3 +2661,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFLEDPreTrainedModel", "TFLEDModel", "TFLEDForConditionalGeneration", "TFLEDMainLayer"] diff --git a/src/transformers/models/led/tokenization_led.py b/src/transformers/models/led/tokenization_led.py index aaf09e6d149e..8f61378f5ecc 100644 --- a/src/transformers/models/led/tokenization_led.py +++ b/src/transformers/models/led/tokenization_led.py @@ -447,3 +447,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["LEDTokenizer"] diff --git a/src/transformers/models/led/tokenization_led_fast.py b/src/transformers/models/led/tokenization_led_fast.py index ca15eb997bed..de186499a9bb 100644 --- a/src/transformers/models/led/tokenization_led_fast.py +++ b/src/transformers/models/led/tokenization_led_fast.py @@ -323,3 +323,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["LEDTokenizerFast"] diff --git a/src/transformers/models/levit/__init__.py b/src/transformers/models/levit/__init__.py index 266889963c90..609d9d86c30c 100644 --- a/src/transformers/models/levit/__init__.py +++ b/src/transformers/models/levit/__init__.py @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_levit": ["LevitConfig", "LevitOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_levit"] = ["LevitFeatureExtractor"] - _import_structure["image_processing_levit"] = ["LevitImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_levit"] = [ - "LevitForImageClassification", - "LevitForImageClassificationWithTeacher", - "LevitModel", - "LevitPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_levit import LevitConfig, LevitOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_levit import LevitFeatureExtractor - from .image_processing_levit import LevitImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_levit import ( - LevitForImageClassification, - LevitForImageClassificationWithTeacher, - LevitModel, - LevitPreTrainedModel, - ) + from .configuration_levit import * + from .feature_extraction_levit import * + from .image_processing_levit import * + from .modeling_levit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index 5b049309594c..b15cc11226aa 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -139,3 +139,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["LevitConfig", "LevitOnnxConfig"] diff --git a/src/transformers/models/levit/feature_extraction_levit.py b/src/transformers/models/levit/feature_extraction_levit.py index 91308cf0ba18..2b63cd16baa5 100644 --- a/src/transformers/models/levit/feature_extraction_levit.py +++ b/src/transformers/models/levit/feature_extraction_levit.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_levit import LevitImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class LevitFeatureExtractor(LevitImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["LevitFeatureExtractor"] diff --git a/src/transformers/models/levit/image_processing_levit.py b/src/transformers/models/levit/image_processing_levit.py index fad47ee02736..3f0c92a886b7 100644 --- a/src/transformers/models/levit/image_processing_levit.py +++ b/src/transformers/models/levit/image_processing_levit.py @@ -38,11 +38,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class LevitImageProcessor(BaseImageProcessor): r""" Constructs a LeViT image processor. @@ -304,3 +306,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["LevitImageProcessor"] diff --git a/src/transformers/models/levit/modeling_levit.py b/src/transformers/models/levit/modeling_levit.py index af202787a166..68421359e884 100644 --- a/src/transformers/models/levit/modeling_levit.py +++ b/src/transformers/models/levit/modeling_levit.py @@ -733,3 +733,11 @@ def forward( distillation_logits=distill_logits, hidden_states=outputs.hidden_states, ) + + +__all__ = [ + "LevitPreTrainedModel", + "LevitModel", + "LevitForImageClassification", + "LevitForImageClassificationWithTeacher", +] diff --git a/src/transformers/models/lilt/__init__.py b/src/transformers/models/lilt/__init__.py index 5b73f3aebd9c..94f4f953b7c5 100644 --- a/src/transformers/models/lilt/__init__.py +++ b/src/transformers/models/lilt/__init__.py @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_lilt": ["LiltConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_lilt"] = [ - "LiltForQuestionAnswering", - "LiltForSequenceClassification", - "LiltForTokenClassification", - "LiltModel", - "LiltPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_lilt import LiltConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_lilt import ( - LiltForQuestionAnswering, - LiltForSequenceClassification, - LiltForTokenClassification, - LiltModel, - LiltPreTrainedModel, - ) - + from .configuration_lilt import * + from .modeling_lilt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/lilt/configuration_lilt.py b/src/transformers/models/lilt/configuration_lilt.py index 57ab8884ed4d..30f32cca4a56 100644 --- a/src/transformers/models/lilt/configuration_lilt.py +++ b/src/transformers/models/lilt/configuration_lilt.py @@ -126,3 +126,6 @@ def __init__( self.classifier_dropout = classifier_dropout self.channel_shrink_ratio = channel_shrink_ratio self.max_2d_position_embeddings = max_2d_position_embeddings + + +__all__ = ["LiltConfig"] diff --git a/src/transformers/models/lilt/modeling_lilt.py b/src/transformers/models/lilt/modeling_lilt.py index 85cbcfdc4c45..2db6d9b845bb 100644 --- a/src/transformers/models/lilt/modeling_lilt.py +++ b/src/transformers/models/lilt/modeling_lilt.py @@ -1181,3 +1181,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LiltPreTrainedModel", + "LiltModel", + "LiltForSequenceClassification", + "LiltForTokenClassification", + "LiltForQuestionAnswering", +] diff --git a/src/transformers/models/llama/__init__.py b/src/transformers/models/llama/__init__.py index 3f6461c4c093..e060384fcdd3 100644 --- a/src/transformers/models/llama/__init__.py +++ b/src/transformers/models/llama/__init__.py @@ -13,104 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_llama": ["LlamaConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_llama"] = ["LlamaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llama"] = [ - "LlamaForCausalLM", - "LlamaModel", - "LlamaPreTrainedModel", - "LlamaForSequenceClassification", - "LlamaForQuestionAnswering", - "LlamaForTokenClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_llama import LlamaConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_llama import LlamaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_llama_fast import LlamaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llama import ( - LlamaForCausalLM, - LlamaForQuestionAnswering, - LlamaForSequenceClassification, - LlamaForTokenClassification, - LlamaModel, - LlamaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel - - + from .configuration_llama import * + from .modeling_flax_llama import * + from .modeling_llama import * + from .tokenization_llama import * + from .tokenization_llama_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llama/configuration_llama.py b/src/transformers/models/llama/configuration_llama.py index 435f0091e06e..c7e410f1df53 100644 --- a/src/transformers/models/llama/configuration_llama.py +++ b/src/transformers/models/llama/configuration_llama.py @@ -204,3 +204,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["LlamaConfig"] diff --git a/src/transformers/models/llama/modeling_flax_llama.py b/src/transformers/models/llama/modeling_flax_llama.py index 26a2c2bb09a3..48fc76f42d1c 100644 --- a/src/transformers/models/llama/modeling_flax_llama.py +++ b/src/transformers/models/llama/modeling_flax_llama.py @@ -742,3 +742,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) + +__all__ = ["FlaxLlamaPreTrainedModel", "FlaxLlamaModel", "FlaxLlamaForCausalLM"] diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 5e39c4ebbf21..caa7854733f5 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -1615,3 +1615,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LlamaPreTrainedModel", + "LlamaModel", + "LlamaForCausalLM", + "LlamaForSequenceClassification", + "LlamaForQuestionAnswering", + "LlamaForTokenClassification", +] diff --git a/src/transformers/models/llama/tokenization_llama.py b/src/transformers/models/llama/tokenization_llama.py index cc03c1470ee2..01443441ab3a 100644 --- a/src/transformers/models/llama/tokenization_llama.py +++ b/src/transformers/models/llama/tokenization_llama.py @@ -29,6 +29,7 @@ from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export if TYPE_CHECKING: @@ -53,6 +54,7 @@ # fmt: on +@export(backends=("sentencepiece",)) class LlamaTokenizer(PreTrainedTokenizer): """ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is diff --git a/src/transformers/models/llama/tokenization_llama_fast.py b/src/transformers/models/llama/tokenization_llama_fast.py index 67e339b4290a..cb8b742ed41b 100644 --- a/src/transformers/models/llama/tokenization_llama_fast.py +++ b/src/transformers/models/llama/tokenization_llama_fast.py @@ -253,3 +253,6 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = output + bos_token_id + token_ids_1 + eos_token_id return output + + +__all__ = ["LlamaTokenizerFast"] diff --git a/src/transformers/models/llava/__init__.py b/src/transformers/models/llava/__init__.py index 3dabdc1f678f..999852ac3050 100644 --- a/src/transformers/models/llava/__init__.py +++ b/src/transformers/models/llava/__init__.py @@ -13,43 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_llava": ["LlavaConfig"], - "processing_llava": ["LlavaProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llava"] = [ - "LlavaForConditionalGeneration", - "LlavaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_llava import LlavaConfig - from .processing_llava import LlavaProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llava import ( - LlavaForConditionalGeneration, - LlavaPreTrainedModel, - ) - + from .configuration_llava import * + from .modeling_llava import * + from .processing_llava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llava/configuration_llava.py b/src/transformers/models/llava/configuration_llava.py index f2338a7c5a5d..79d20f47399a 100644 --- a/src/transformers/models/llava/configuration_llava.py +++ b/src/transformers/models/llava/configuration_llava.py @@ -129,3 +129,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["LlavaConfig"] diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index ae53156d9ba2..61c5f5af2f95 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -21,9 +21,9 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -593,3 +593,6 @@ def prepare_inputs_for_generation( model_inputs["pixel_values"] = pixel_values return model_inputs + + +__all__ = ["LlavaPreTrainedModel", "LlavaForConditionalGeneration"] diff --git a/src/transformers/models/llava/processing_llava.py b/src/transformers/models/llava/processing_llava.py index 99244d993b71..bc053e9428df 100644 --- a/src/transformers/models/llava/processing_llava.py +++ b/src/transformers/models/llava/processing_llava.py @@ -189,3 +189,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["LlavaProcessor"] diff --git a/src/transformers/models/llava_next/__init__.py b/src/transformers/models/llava_next/__init__.py index 0fb2ff2b6f28..028cc73be407 100644 --- a/src/transformers/models/llava_next/__init__.py +++ b/src/transformers/models/llava_next/__init__.py @@ -13,60 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_llava_next": ["LlavaNextConfig"], - "processing_llava_next": ["LlavaNextProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_llava_next"] = [ - "LlavaNextForConditionalGeneration", - "LlavaNextPreTrainedModel", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_llava_next"] = ["LlavaNextImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_llava_next import LlavaNextConfig - from .processing_llava_next import LlavaNextProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_llava_next import ( - LlavaNextForConditionalGeneration, - LlavaNextPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_llava_next import LlavaNextImageProcessor - - + from .configuration_llava_next import * + from .image_processing_llava_next import * + from .modeling_llava_next import * + from .processing_llava_next import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/llava_next/configuration_llava_next.py b/src/transformers/models/llava_next/configuration_llava_next.py index e8768dde8572..9f66f432dd15 100644 --- a/src/transformers/models/llava_next/configuration_llava_next.py +++ b/src/transformers/models/llava_next/configuration_llava_next.py @@ -142,3 +142,6 @@ def __init__( self.text_config = text_config super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +__all__ = ["LlavaNextConfig"] diff --git a/src/transformers/models/llava_next/image_processing_llava_next.py b/src/transformers/models/llava_next/image_processing_llava_next.py index 579e6d44c143..b975fb0f0aca 100644 --- a/src/transformers/models/llava_next/image_processing_llava_next.py +++ b/src/transformers/models/llava_next/image_processing_llava_next.py @@ -44,6 +44,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -139,6 +140,7 @@ def _get_patch_output_size(image, target_resolution, input_data_format): return new_height, new_width +@export(backends=("vision",)) class LlavaNextImageProcessor(BaseImageProcessor): r""" Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques @@ -747,3 +749,6 @@ def preprocess( return BatchFeature( data={"pixel_values": processed_images, "image_sizes": image_sizes}, tensor_type=return_tensors ) + + +__all__ = ["LlavaNextImageProcessor"] diff --git a/src/transformers/models/llava_next/modeling_llava_next.py b/src/transformers/models/llava_next/modeling_llava_next.py index 5fe029f13e73..7518251f47e0 100644 --- a/src/transformers/models/llava_next/modeling_llava_next.py +++ b/src/transformers/models/llava_next/modeling_llava_next.py @@ -23,10 +23,10 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN from ...image_processing_utils import select_best_resolution from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -966,3 +966,6 @@ def prepare_inputs_for_generation( model_inputs["image_sizes"] = image_sizes return model_inputs + + +__all__ = ["LlavaNextPreTrainedModel", "LlavaNextForConditionalGeneration"] diff --git a/src/transformers/models/llava_next/processing_llava_next.py b/src/transformers/models/llava_next/processing_llava_next.py index f84578d1f346..1570fe2174de 100644 --- a/src/transformers/models/llava_next/processing_llava_next.py +++ b/src/transformers/models/llava_next/processing_llava_next.py @@ -239,3 +239,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["LlavaNextProcessor"] diff --git a/src/transformers/models/longformer/__init__.py b/src/transformers/models/longformer/__init__.py index ddbd8a68ecc6..a95cf7e57ddb 100644 --- a/src/transformers/models/longformer/__init__.py +++ b/src/transformers/models/longformer/__init__.py @@ -11,119 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_longformer": [ - "LongformerConfig", - "LongformerOnnxConfig", - ], - "tokenization_longformer": ["LongformerTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_longformer_fast"] = ["LongformerTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_longformer"] = [ - "LongformerForMaskedLM", - "LongformerForMultipleChoice", - "LongformerForQuestionAnswering", - "LongformerForSequenceClassification", - "LongformerForTokenClassification", - "LongformerModel", - "LongformerPreTrainedModel", - "LongformerSelfAttention", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_longformer"] = [ - "TFLongformerForMaskedLM", - "TFLongformerForMultipleChoice", - "TFLongformerForQuestionAnswering", - "TFLongformerForSequenceClassification", - "TFLongformerForTokenClassification", - "TFLongformerModel", - "TFLongformerPreTrainedModel", - "TFLongformerSelfAttention", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_longformer import ( - LongformerConfig, - LongformerOnnxConfig, - ) - from .tokenization_longformer import LongformerTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_longformer_fast import LongformerTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_longformer import ( - LongformerForMaskedLM, - LongformerForMultipleChoice, - LongformerForQuestionAnswering, - LongformerForSequenceClassification, - LongformerForTokenClassification, - LongformerModel, - LongformerPreTrainedModel, - LongformerSelfAttention, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_longformer import ( - TFLongformerForMaskedLM, - TFLongformerForMultipleChoice, - TFLongformerForQuestionAnswering, - TFLongformerForSequenceClassification, - TFLongformerForTokenClassification, - TFLongformerModel, - TFLongformerPreTrainedModel, - TFLongformerSelfAttention, - ) - + from .configuration_longformer import * + from .modeling_longformer import * + from .modeling_tf_longformer import * + from .tokenization_longformer import * + from .tokenization_longformer_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/longformer/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py index fc6093763709..7a4d69847175 100644 --- a/src/transformers/models/longformer/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -199,3 +199,6 @@ def generate_dummy_inputs( inputs["global_attention_mask"][:, ::2] = 1 return inputs + + +__all__ = ["LongformerConfig", "LongformerOnnxConfig"] diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index 67b5e2b67f0b..93988e7b833c 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -2322,3 +2322,14 @@ def forward( attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) + + +__all__ = [ + "LongformerPreTrainedModel", + "LongformerModel", + "LongformerForMaskedLM", + "LongformerForSequenceClassification", + "LongformerForQuestionAnswering", + "LongformerForTokenClassification", + "LongformerForMultipleChoice", +] diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index b32cde202cea..c7f57fd3a09e 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -2772,3 +2772,15 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFLongformerPreTrainedModel", + "TFLongformerModel", + "TFLongformerForMaskedLM", + "TFLongformerForQuestionAnswering", + "TFLongformerForSequenceClassification", + "TFLongformerForMultipleChoice", + "TFLongformerForTokenClassification", + "TFLongformerMainLayer", +] diff --git a/src/transformers/models/longformer/tokenization_longformer.py b/src/transformers/models/longformer/tokenization_longformer.py index 51728d778081..afecf750135b 100644 --- a/src/transformers/models/longformer/tokenization_longformer.py +++ b/src/transformers/models/longformer/tokenization_longformer.py @@ -397,3 +397,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["LongformerTokenizer"] diff --git a/src/transformers/models/longformer/tokenization_longformer_fast.py b/src/transformers/models/longformer/tokenization_longformer_fast.py index d4b4228b035f..3d3ca97a6f64 100644 --- a/src/transformers/models/longformer/tokenization_longformer_fast.py +++ b/src/transformers/models/longformer/tokenization_longformer_fast.py @@ -268,3 +268,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["LongformerTokenizerFast"] diff --git a/src/transformers/models/longt5/__init__.py b/src/transformers/models/longt5/__init__.py index 97d2bbe8ccd3..ba07f1ec3b51 100644 --- a/src/transformers/models/longt5/__init__.py +++ b/src/transformers/models/longt5/__init__.py @@ -11,72 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - - -_import_structure = { - "configuration_longt5": ["LongT5Config", "LongT5OnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_longt5"] = [ - "LongT5EncoderModel", - "LongT5ForConditionalGeneration", - "LongT5Model", - "LongT5PreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_longt5"] = [ - "FlaxLongT5ForConditionalGeneration", - "FlaxLongT5Model", - "FlaxLongT5PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_longt5 import LongT5Config, LongT5OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_longt5 import ( - LongT5EncoderModel, - LongT5ForConditionalGeneration, - LongT5Model, - LongT5PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_longt5 import ( - FlaxLongT5ForConditionalGeneration, - FlaxLongT5Model, - FlaxLongT5PreTrainedModel, - ) - - + from .configuration_longt5 import * + from .modeling_flax_longt5 import * + from .modeling_longt5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/longt5/configuration_longt5.py b/src/transformers/models/longt5/configuration_longt5.py index 0e541ae2a1b4..5b6c45fe2eaf 100644 --- a/src/transformers/models/longt5/configuration_longt5.py +++ b/src/transformers/models/longt5/configuration_longt5.py @@ -170,3 +170,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["LongT5Config", "LongT5OnnxConfig"] diff --git a/src/transformers/models/longt5/modeling_flax_longt5.py b/src/transformers/models/longt5/modeling_flax_longt5.py index 4ab18a3ca7c8..22d7684dae36 100644 --- a/src/transformers/models/longt5/modeling_flax_longt5.py +++ b/src/transformers/models/longt5/modeling_flax_longt5.py @@ -2444,3 +2444,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = ["FlaxLongT5PreTrainedModel", "FlaxLongT5Model", "FlaxLongT5ForConditionalGeneration"] diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index b2a6ed11ca57..422205c5bc91 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -2231,3 +2231,6 @@ def forward( ) return encoder_outputs + + +__all__ = ["LongT5PreTrainedModel", "LongT5Model", "LongT5ForConditionalGeneration", "LongT5EncoderModel"] diff --git a/src/transformers/models/luke/__init__.py b/src/transformers/models/luke/__init__.py index 5ae6f488116f..db3f4f8cb333 100644 --- a/src/transformers/models/luke/__init__.py +++ b/src/transformers/models/luke/__init__.py @@ -11,61 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_luke": ["LukeConfig"], - "tokenization_luke": ["LukeTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_luke"] = [ - "LukeForEntityClassification", - "LukeForEntityPairClassification", - "LukeForEntitySpanClassification", - "LukeForMultipleChoice", - "LukeForQuestionAnswering", - "LukeForSequenceClassification", - "LukeForTokenClassification", - "LukeForMaskedLM", - "LukeModel", - "LukePreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_luke import LukeConfig - from .tokenization_luke import LukeTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_luke import ( - LukeForEntityClassification, - LukeForEntityPairClassification, - LukeForEntitySpanClassification, - LukeForMaskedLM, - LukeForMultipleChoice, - LukeForQuestionAnswering, - LukeForSequenceClassification, - LukeForTokenClassification, - LukeModel, - LukePreTrainedModel, - ) - + from .configuration_luke import * + from .modeling_luke import * + from .tokenization_luke import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/luke/configuration_luke.py b/src/transformers/models/luke/configuration_luke.py index 44e1002cfbdc..3d4640a9fd25 100644 --- a/src/transformers/models/luke/configuration_luke.py +++ b/src/transformers/models/luke/configuration_luke.py @@ -137,3 +137,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_entity_aware_attention = use_entity_aware_attention self.classifier_dropout = classifier_dropout + + +__all__ = ["LukeConfig"] diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 803f4396a2b6..b45fae678b92 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -2226,3 +2226,17 @@ def forward( entity_hidden_states=outputs.entity_hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "LukePreTrainedModel", + "LukeModel", + "LukeForMaskedLM", + "LukeForEntityClassification", + "LukeForEntityPairClassification", + "LukeForEntitySpanClassification", + "LukeForSequenceClassification", + "LukeForTokenClassification", + "LukeForQuestionAnswering", + "LukeForMultipleChoice", +] diff --git a/src/transformers/models/luke/tokenization_luke.py b/src/transformers/models/luke/tokenization_luke.py index 1a570992ffb4..662446bd4e1e 100644 --- a/src/transformers/models/luke/tokenization_luke.py +++ b/src/transformers/models/luke/tokenization_luke.py @@ -1703,3 +1703,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.entity_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return vocab_file, merge_file, entity_vocab_file + + +__all__ = ["LukeTokenizer"] diff --git a/src/transformers/models/lxmert/__init__.py b/src/transformers/models/lxmert/__init__.py index 007beb4ecd2d..1f83d7299250 100644 --- a/src/transformers/models/lxmert/__init__.py +++ b/src/transformers/models/lxmert/__init__.py @@ -11,105 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_lxmert": ["LxmertConfig"], - "tokenization_lxmert": ["LxmertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_lxmert"] = [ - "LxmertEncoder", - "LxmertForPreTraining", - "LxmertForQuestionAnswering", - "LxmertModel", - "LxmertPreTrainedModel", - "LxmertVisualFeatureEncoder", - "LxmertXLayer", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_lxmert"] = [ - "TFLxmertForPreTraining", - "TFLxmertMainLayer", - "TFLxmertModel", - "TFLxmertPreTrainedModel", - "TFLxmertVisualFeatureEncoder", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_lxmert import LxmertConfig - from .tokenization_lxmert import LxmertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_lxmert_fast import LxmertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_lxmert import ( - LxmertEncoder, - LxmertForPreTraining, - LxmertForQuestionAnswering, - LxmertModel, - LxmertPreTrainedModel, - LxmertVisualFeatureEncoder, - LxmertXLayer, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_lxmert import ( - TFLxmertForPreTraining, - TFLxmertMainLayer, - TFLxmertModel, - TFLxmertPreTrainedModel, - TFLxmertVisualFeatureEncoder, - ) - + from .configuration_lxmert import * + from .modeling_lxmert import * + from .modeling_tf_lxmert import * + from .tokenization_lxmert import * + from .tokenization_lxmert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/lxmert/configuration_lxmert.py b/src/transformers/models/lxmert/configuration_lxmert.py index d753e752272b..c092d01148a6 100644 --- a/src/transformers/models/lxmert/configuration_lxmert.py +++ b/src/transformers/models/lxmert/configuration_lxmert.py @@ -164,3 +164,6 @@ def __init__( self.visual_feat_loss = visual_feat_loss self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**kwargs) + + +__all__ = ["LxmertConfig"] diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 9113fc4fd0eb..9776838683d3 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -1448,3 +1448,13 @@ def forward( vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) + + +__all__ = [ + "LxmertPreTrainedModel", + "LxmertModel", + "LxmertForPreTraining", + "LxmertForQuestionAnswering", + "LxmertEncoder", + "LxmertVisualFeatureEncoder", +] diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 8a833fb35adc..9f02a6b67a43 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -1650,3 +1650,12 @@ def build(self, input_shape=None): if getattr(self, "answer_head", None) is not None: with tf.name_scope(self.answer_head.name): self.answer_head.build(None) + + +__all__ = [ + "TFLxmertPreTrainedModel", + "TFLxmertModel", + "TFLxmertForPreTraining", + "TFLxmertMainLayer", + "TFLxmertVisualFeatureEncoder", +] diff --git a/src/transformers/models/lxmert/tokenization_lxmert.py b/src/transformers/models/lxmert/tokenization_lxmert.py index 5800f6b0d4a3..94a01ce91137 100644 --- a/src/transformers/models/lxmert/tokenization_lxmert.py +++ b/src/transformers/models/lxmert/tokenization_lxmert.py @@ -501,3 +501,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["LxmertTokenizer"] diff --git a/src/transformers/models/lxmert/tokenization_lxmert_fast.py b/src/transformers/models/lxmert/tokenization_lxmert_fast.py index e31fdbcf761d..9a6a11bba217 100644 --- a/src/transformers/models/lxmert/tokenization_lxmert_fast.py +++ b/src/transformers/models/lxmert/tokenization_lxmert_fast.py @@ -167,3 +167,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["LxmertTokenizerFast"] diff --git a/src/transformers/models/m2m_100/__init__.py b/src/transformers/models/m2m_100/__init__.py index 45232f1390a5..adf66d185cdc 100644 --- a/src/transformers/models/m2m_100/__init__.py +++ b/src/transformers/models/m2m_100/__init__.py @@ -13,46 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_m2m_100": ["M2M100Config", "M2M100OnnxConfig"], - "tokenization_m2m_100": ["M2M100Tokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_m2m_100"] = [ - "M2M100ForConditionalGeneration", - "M2M100Model", - "M2M100PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_m2m_100 import M2M100Config, M2M100OnnxConfig - from .tokenization_m2m_100 import M2M100Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_m2m_100 import ( - M2M100ForConditionalGeneration, - M2M100Model, - M2M100PreTrainedModel, - ) - - + from .configuration_m2m_100 import * + from .modeling_m2m_100 import * + from .tokenization_m2m_100 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/m2m_100/configuration_m2m_100.py b/src/transformers/models/m2m_100/configuration_m2m_100.py index 7ae3c44127e0..ffff894c9249 100644 --- a/src/transformers/models/m2m_100/configuration_m2m_100.py +++ b/src/transformers/models/m2m_100/configuration_m2m_100.py @@ -17,10 +17,10 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import TensorType, is_torch_available, logging @@ -278,3 +278,6 @@ def _generate_dummy_inputs_for_default_and_seq2seq_lm( return common_inputs generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm + + +__all__ = ["M2M100Config", "M2M100OnnxConfig"] diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 23a855fff256..f0884b1238da 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -1491,3 +1491,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["M2M100PreTrainedModel", "M2M100Model", "M2M100ForConditionalGeneration"] diff --git a/src/transformers/models/m2m_100/tokenization_m2m_100.py b/src/transformers/models/m2m_100/tokenization_m2m_100.py index 403d8cc50778..be6ded953a25 100644 --- a/src/transformers/models/m2m_100/tokenization_m2m_100.py +++ b/src/transformers/models/m2m_100/tokenization_m2m_100.py @@ -23,6 +23,7 @@ from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -44,6 +45,7 @@ # fmt: on +@export(backends=("sentencepiece",)) class M2M100Tokenizer(PreTrainedTokenizer): """ Construct an M2M100 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -377,3 +379,6 @@ def load_json(path: str) -> Union[Dict, List]: def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2) + + +__all__ = ["M2M100Tokenizer"] diff --git a/src/transformers/models/mamba/__init__.py b/src/transformers/models/mamba/__init__.py index 80cb8e1c68a2..a8cd57ed3f2b 100644 --- a/src/transformers/models/mamba/__init__.py +++ b/src/transformers/models/mamba/__init__.py @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_mamba": ["MambaConfig", "MambaOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mamba"] = [ - "MambaForCausalLM", - "MambaModel", - "MambaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mamba import MambaConfig, MambaOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mamba import ( - MambaForCausalLM, - MambaModel, - MambaPreTrainedModel, - ) + from .configuration_mamba import * + from .modeling_mamba import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mamba/configuration_mamba.py b/src/transformers/models/mamba/configuration_mamba.py index 89f08dd3cd32..6bf80e5fe92d 100644 --- a/src/transformers/models/mamba/configuration_mamba.py +++ b/src/transformers/models/mamba/configuration_mamba.py @@ -155,3 +155,6 @@ def __init__( self.use_mambapy = use_mambapy super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs) + + +__all__ = ["MambaConfig"] diff --git a/src/transformers/models/mamba/modeling_mamba.py b/src/transformers/models/mamba/modeling_mamba.py index 14a3dea1d1cc..1ae70ea987c5 100644 --- a/src/transformers/models/mamba/modeling_mamba.py +++ b/src/transformers/models/mamba/modeling_mamba.py @@ -806,3 +806,6 @@ def forward( cache_params=mamba_outputs.cache_params, hidden_states=mamba_outputs.hidden_states, ) + + +__all__ = ["MambaPreTrainedModel", "MambaModel", "MambaForCausalLM"] diff --git a/src/transformers/models/marian/__init__.py b/src/transformers/models/marian/__init__.py index e3a8c473aeee..bc1da5ef685f 100644 --- a/src/transformers/models/marian/__init__.py +++ b/src/transformers/models/marian/__init__.py @@ -13,99 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_marian": ["MarianConfig", "MarianOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_marian"] = ["MarianTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_marian"] = [ - "MarianForCausalLM", - "MarianModel", - "MarianMTModel", - "MarianPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_marian"] = ["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_marian"] = ["FlaxMarianModel", "FlaxMarianMTModel", "FlaxMarianPreTrainedModel"] - if TYPE_CHECKING: - from .configuration_marian import MarianConfig, MarianOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_marian import MarianTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_marian import ( - MarianForCausalLM, - MarianModel, - MarianMTModel, - MarianPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_marian import TFMarianModel, TFMarianMTModel, TFMarianPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_marian import FlaxMarianModel, FlaxMarianMTModel, FlaxMarianPreTrainedModel - + from .configuration_marian import * + from .modeling_flax_marian import * + from .modeling_marian import * + from .modeling_tf_marian import * + from .tokenization_marian import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/marian/configuration_marian.py b/src/transformers/models/marian/configuration_marian.py index 5a3f083804d5..f4e151b1d1e9 100644 --- a/src/transformers/models/marian/configuration_marian.py +++ b/src/transformers/models/marian/configuration_marian.py @@ -17,10 +17,10 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import TensorType, is_torch_available, logging @@ -389,3 +389,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MarianConfig", "MarianOnnxConfig"] diff --git a/src/transformers/models/marian/modeling_flax_marian.py b/src/transformers/models/marian/modeling_flax_marian.py index e33df2e06b21..7fb3e90d31e6 100644 --- a/src/transformers/models/marian/modeling_flax_marian.py +++ b/src/transformers/models/marian/modeling_flax_marian.py @@ -1495,3 +1495,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): MARIAN_INPUTS_DOCSTRING + FLAX_MARIAN_MT_DOCSTRING, ) append_replace_return_docstrings(FlaxMarianMTModel, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + +__all__ = ["FlaxMarianPreTrainedModel", "FlaxMarianModel", "FlaxMarianMTModel"] diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 2045f673540f..9bd79c68b0a2 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -1716,3 +1716,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["MarianPreTrainedModel", "MarianModel", "MarianMTModel", "MarianForCausalLM"] diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 30c6157d5008..3c25a6cd24ef 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -1554,3 +1554,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFMarianPreTrainedModel", "TFMarianModel", "TFMarianMTModel", "TFMarianMainLayer"] diff --git a/src/transformers/models/marian/tokenization_marian.py b/src/transformers/models/marian/tokenization_marian.py index 4f0d90b6f0df..496d3c499128 100644 --- a/src/transformers/models/marian/tokenization_marian.py +++ b/src/transformers/models/marian/tokenization_marian.py @@ -23,6 +23,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -41,6 +42,7 @@ # Example URL https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json +@export(backends=("sentencepiece",)) class MarianTokenizer(PreTrainedTokenizer): r""" Construct a Marian tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -389,3 +391,6 @@ def save_json(data, path: str) -> None: def load_json(path: str) -> Union[Dict, List]: with open(path, "r") as f: return json.load(f) + + +__all__ = ["MarianTokenizer"] diff --git a/src/transformers/models/markuplm/__init__.py b/src/transformers/models/markuplm/__init__.py index 368834f13e98..6afd3336f954 100644 --- a/src/transformers/models/markuplm/__init__.py +++ b/src/transformers/models/markuplm/__init__.py @@ -13,69 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_markuplm": ["MarkupLMConfig"], - "feature_extraction_markuplm": ["MarkupLMFeatureExtractor"], - "processing_markuplm": ["MarkupLMProcessor"], - "tokenization_markuplm": ["MarkupLMTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_markuplm_fast"] = ["MarkupLMTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_markuplm"] = [ - "MarkupLMForQuestionAnswering", - "MarkupLMForSequenceClassification", - "MarkupLMForTokenClassification", - "MarkupLMModel", - "MarkupLMPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_markuplm import MarkupLMConfig - from .feature_extraction_markuplm import MarkupLMFeatureExtractor - from .processing_markuplm import MarkupLMProcessor - from .tokenization_markuplm import MarkupLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_markuplm_fast import MarkupLMTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_markuplm import ( - MarkupLMForQuestionAnswering, - MarkupLMForSequenceClassification, - MarkupLMForTokenClassification, - MarkupLMModel, - MarkupLMPreTrainedModel, - ) - - + from .configuration_markuplm import * + from .feature_extraction_markuplm import * + from .modeling_markuplm import * + from .processing_markuplm import * + from .tokenization_markuplm import * + from .tokenization_markuplm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py index e348a5c5a1b4..f8bee878e83b 100644 --- a/src/transformers/models/markuplm/configuration_markuplm.py +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -151,3 +151,6 @@ def __init__( self.tag_pad_id = tag_pad_id self.subs_pad_id = subs_pad_id self.xpath_unit_hidden_size = xpath_unit_hidden_size + + +__all__ = ["MarkupLMConfig"] diff --git a/src/transformers/models/markuplm/feature_extraction_markuplm.py b/src/transformers/models/markuplm/feature_extraction_markuplm.py index e3effdc910a8..9b1bd29060bf 100644 --- a/src/transformers/models/markuplm/feature_extraction_markuplm.py +++ b/src/transformers/models/markuplm/feature_extraction_markuplm.py @@ -181,3 +181,6 @@ def __call__(self, html_strings) -> BatchFeature: encoded_inputs = BatchFeature(data=data, tensor_type=None) return encoded_inputs + + +__all__ = ["MarkupLMFeatureExtractor"] diff --git a/src/transformers/models/markuplm/modeling_markuplm.py b/src/transformers/models/markuplm/modeling_markuplm.py index a3aa69621ce1..1edc08eae9cd 100755 --- a/src/transformers/models/markuplm/modeling_markuplm.py +++ b/src/transformers/models/markuplm/modeling_markuplm.py @@ -1321,3 +1321,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MarkupLMPreTrainedModel", + "MarkupLMModel", + "MarkupLMForQuestionAnswering", + "MarkupLMForTokenClassification", + "MarkupLMForSequenceClassification", +] diff --git a/src/transformers/models/markuplm/processing_markuplm.py b/src/transformers/models/markuplm/processing_markuplm.py index 757c146c5898..8e822af6b2da 100644 --- a/src/transformers/models/markuplm/processing_markuplm.py +++ b/src/transformers/models/markuplm/processing_markuplm.py @@ -145,3 +145,6 @@ def decode(self, *args, **kwargs): def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names return tokenizer_input_names + + +__all__ = ["MarkupLMProcessor"] diff --git a/src/transformers/models/markuplm/tokenization_markuplm.py b/src/transformers/models/markuplm/tokenization_markuplm.py index c77865abc934..82513850ff91 100644 --- a/src/transformers/models/markuplm/tokenization_markuplm.py +++ b/src/transformers/models/markuplm/tokenization_markuplm.py @@ -1443,3 +1443,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["MarkupLMTokenizer"] diff --git a/src/transformers/models/markuplm/tokenization_markuplm_fast.py b/src/transformers/models/markuplm/tokenization_markuplm_fast.py index ff0e4ffeb56e..66b43d605db1 100644 --- a/src/transformers/models/markuplm/tokenization_markuplm_fast.py +++ b/src/transformers/models/markuplm/tokenization_markuplm_fast.py @@ -916,3 +916,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["MarkupLMTokenizerFast"] diff --git a/src/transformers/models/mask2former/__init__.py b/src/transformers/models/mask2former/__init__.py index 7ede863452bc..b3cbc2f646f6 100644 --- a/src/transformers/models/mask2former/__init__.py +++ b/src/transformers/models/mask2former/__init__.py @@ -13,58 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mask2former": ["Mask2FormerConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_mask2former"] = ["Mask2FormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mask2former"] = [ - "Mask2FormerForUniversalSegmentation", - "Mask2FormerModel", - "Mask2FormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mask2former import Mask2FormerConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_mask2former import Mask2FormerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mask2former import ( - Mask2FormerForUniversalSegmentation, - Mask2FormerModel, - Mask2FormerPreTrainedModel, - ) - - + from .configuration_mask2former import * + from .image_processing_mask2former import * + from .modeling_mask2former import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mask2former/configuration_mask2former.py b/src/transformers/models/mask2former/configuration_mask2former.py index 5126b3f73cde..af247a19a8de 100644 --- a/src/transformers/models/mask2former/configuration_mask2former.py +++ b/src/transformers/models/mask2former/configuration_mask2former.py @@ -251,3 +251,6 @@ def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): backbone_config=backbone_config, **kwargs, ) + + +__all__ = ["Mask2FormerConfig"] diff --git a/src/transformers/models/mask2former/image_processing_mask2former.py b/src/transformers/models/mask2former/image_processing_mask2former.py index 695ae654ccba..a33c34a72dcc 100644 --- a/src/transformers/models/mask2former/image_processing_mask2former.py +++ b/src/transformers/models/mask2former/image_processing_mask2former.py @@ -50,6 +50,7 @@ logging, ) from ...utils.deprecation import deprecate_kwarg +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -349,6 +350,7 @@ def get_mask2former_resize_output_image_size( return output_size +@export(backends=("vision",)) class Mask2FormerImageProcessor(BaseImageProcessor): r""" Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets @@ -1235,3 +1237,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["Mask2FormerImageProcessor"] diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index c5788951fd59..eb7d65cede65 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -2555,3 +2555,6 @@ def forward( if loss is not None: output = (loss) + output return output + + +__all__ = ["Mask2FormerPreTrainedModel", "Mask2FormerModel", "Mask2FormerForUniversalSegmentation"] diff --git a/src/transformers/models/maskformer/__init__.py b/src/transformers/models/maskformer/__init__.py index 78aa54a46561..9d58c71f8c3f 100644 --- a/src/transformers/models/maskformer/__init__.py +++ b/src/transformers/models/maskformer/__init__.py @@ -13,72 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_maskformer": ["MaskFormerConfig"], - "configuration_maskformer_swin": ["MaskFormerSwinConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_maskformer"] = ["MaskFormerFeatureExtractor"] - _import_structure["image_processing_maskformer"] = ["MaskFormerImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_maskformer"] = [ - "MaskFormerForInstanceSegmentation", - "MaskFormerModel", - "MaskFormerPreTrainedModel", - ] - _import_structure["modeling_maskformer_swin"] = [ - "MaskFormerSwinBackbone", - "MaskFormerSwinModel", - "MaskFormerSwinPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_maskformer import MaskFormerConfig - from .configuration_maskformer_swin import MaskFormerSwinConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_maskformer import MaskFormerFeatureExtractor - from .image_processing_maskformer import MaskFormerImageProcessor - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_maskformer import ( - MaskFormerForInstanceSegmentation, - MaskFormerModel, - MaskFormerPreTrainedModel, - ) - from .modeling_maskformer_swin import ( - MaskFormerSwinBackbone, - MaskFormerSwinModel, - MaskFormerSwinPreTrainedModel, - ) - - + from .configuration_maskformer import * + from .configuration_maskformer_swin import * + from .feature_extraction_maskformer import * + from .image_processing_maskformer import * + from .modeling_maskformer import * + from .modeling_maskformer_swin import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/maskformer/configuration_maskformer.py b/src/transformers/models/maskformer/configuration_maskformer.py index d28ef6ca76d2..b5c42f9425d7 100644 --- a/src/transformers/models/maskformer/configuration_maskformer.py +++ b/src/transformers/models/maskformer/configuration_maskformer.py @@ -221,3 +221,6 @@ def from_backbone_and_decoder_configs( decoder_config=decoder_config, **kwargs, ) + + +__all__ = ["MaskFormerConfig"] diff --git a/src/transformers/models/maskformer/configuration_maskformer_swin.py b/src/transformers/models/maskformer/configuration_maskformer_swin.py index 1cc2feffbff3..f3010e71d017 100644 --- a/src/transformers/models/maskformer/configuration_maskformer_swin.py +++ b/src/transformers/models/maskformer/configuration_maskformer_swin.py @@ -148,3 +148,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["MaskFormerSwinConfig"] diff --git a/src/transformers/models/maskformer/feature_extraction_maskformer.py b/src/transformers/models/maskformer/feature_extraction_maskformer.py index 848c8e128296..964c64482ddc 100644 --- a/src/transformers/models/maskformer/feature_extraction_maskformer.py +++ b/src/transformers/models/maskformer/feature_extraction_maskformer.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_maskformer import MaskFormerImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MaskFormerFeatureExtractor(MaskFormerImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MaskFormerFeatureExtractor"] diff --git a/src/transformers/models/maskformer/image_processing_maskformer.py b/src/transformers/models/maskformer/image_processing_maskformer.py index aeec21488415..f91e7a1fa104 100644 --- a/src/transformers/models/maskformer/image_processing_maskformer.py +++ b/src/transformers/models/maskformer/image_processing_maskformer.py @@ -51,6 +51,7 @@ logging, ) from ...utils.deprecation import deprecate_kwarg +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -352,6 +353,7 @@ def get_maskformer_resize_output_image_size( return output_size +@export(backends=("vision",)) class MaskFormerImageProcessor(BaseImageProcessor): r""" Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets @@ -1271,3 +1273,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["MaskFormerImageProcessor"] diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index cd6ef28566a2..bb3f19ed6a12 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -1876,3 +1876,6 @@ def forward( masks_queries_logits=masks_queries_logits, auxiliary_logits=auxiliary_logits, ) + + +__all__ = ["MaskFormerPreTrainedModel", "MaskFormerModel", "MaskFormerForInstanceSegmentation"] diff --git a/src/transformers/models/maskformer/modeling_maskformer_swin.py b/src/transformers/models/maskformer/modeling_maskformer_swin.py index 9a40e0504598..309dd0c83186 100644 --- a/src/transformers/models/maskformer/modeling_maskformer_swin.py +++ b/src/transformers/models/maskformer/modeling_maskformer_swin.py @@ -957,3 +957,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["MaskFormerSwinPreTrainedModel", "MaskFormerSwinModel", "MaskFormerSwinBackbone"] diff --git a/src/transformers/models/mbart/__init__.py b/src/transformers/models/mbart/__init__.py index 12575fcab740..66fca6018e28 100644 --- a/src/transformers/models/mbart/__init__.py +++ b/src/transformers/models/mbart/__init__.py @@ -13,134 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_mbart": ["MBartConfig", "MBartOnnxConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart"] = ["MBartTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart_fast"] = ["MBartTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mbart"] = [ - "MBartForCausalLM", - "MBartForConditionalGeneration", - "MBartForQuestionAnswering", - "MBartForSequenceClassification", - "MBartModel", - "MBartPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mbart"] = [ - "TFMBartForConditionalGeneration", - "TFMBartModel", - "TFMBartPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_mbart"] = [ - "FlaxMBartForConditionalGeneration", - "FlaxMBartForQuestionAnswering", - "FlaxMBartForSequenceClassification", - "FlaxMBartModel", - "FlaxMBartPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mbart import MBartConfig, MBartOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart import MBartTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart_fast import MBartTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mbart import ( - MBartForCausalLM, - MBartForConditionalGeneration, - MBartForQuestionAnswering, - MBartForSequenceClassification, - MBartModel, - MBartPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_mbart import ( - FlaxMBartForConditionalGeneration, - FlaxMBartForQuestionAnswering, - FlaxMBartForSequenceClassification, - FlaxMBartModel, - FlaxMBartPreTrainedModel, - ) - + from .configuration_mbart import * + from .modeling_flax_mbart import * + from .modeling_mbart import * + from .modeling_tf_mbart import * + from .tokenization_mbart import * + from .tokenization_mbart_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mbart/configuration_mbart.py b/src/transformers/models/mbart/configuration_mbart.py index 8a4fe14b6c83..815ebf34c622 100644 --- a/src/transformers/models/mbart/configuration_mbart.py +++ b/src/transformers/models/mbart/configuration_mbart.py @@ -17,10 +17,10 @@ from collections import OrderedDict from typing import Any, Mapping, Optional -from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension +from ...tokenization_utils import PreTrainedTokenizer from ...utils import TensorType, is_torch_available, logging @@ -385,3 +385,6 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) + + +__all__ = ["MBartConfig", "MBartOnnxConfig"] diff --git a/src/transformers/models/mbart/modeling_flax_mbart.py b/src/transformers/models/mbart/modeling_flax_mbart.py index 83e4dcaee279..00e00e228708 100644 --- a/src/transformers/models/mbart/modeling_flax_mbart.py +++ b/src/transformers/models/mbart/modeling_flax_mbart.py @@ -1769,3 +1769,11 @@ class FlaxMBartForQuestionAnswering(FlaxMBartPreTrainedModel): FlaxSeq2SeqQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxMBartPreTrainedModel", + "FlaxMBartModel", + "FlaxMBartForConditionalGeneration", + "FlaxMBartForSequenceClassification", + "FlaxMBartForQuestionAnswering", +] diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 6cad7b08f994..6260c460a13e 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -2042,3 +2042,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "MBartPreTrainedModel", + "MBartModel", + "MBartForConditionalGeneration", + "MBartForSequenceClassification", + "MBartForQuestionAnswering", + "MBartForCausalLM", +] diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 8c9bb9812071..a5d44f5ef0ff 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -1570,3 +1570,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFMBartPreTrainedModel", "TFMBartModel", "TFMBartForConditionalGeneration", "TFMBartMainLayer"] diff --git a/src/transformers/models/mbart/tokenization_mbart.py b/src/transformers/models/mbart/tokenization_mbart.py index d9da6cb45cb3..f7f28fab64d4 100644 --- a/src/transformers/models/mbart/tokenization_mbart.py +++ b/src/transformers/models/mbart/tokenization_mbart.py @@ -21,6 +21,7 @@ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -33,6 +34,7 @@ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip +@export(backends=("sentencepiece",)) class MBartTokenizer(PreTrainedTokenizer): """ Construct an MBART tokenizer. @@ -335,3 +337,6 @@ def set_tgt_lang_special_tokens(self, lang: str) -> None: self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] + + +__all__ = ["MBartTokenizer"] diff --git a/src/transformers/models/mbart/tokenization_mbart_fast.py b/src/transformers/models/mbart/tokenization_mbart_fast.py index 71107bf0cdaf..86aa9181ece0 100644 --- a/src/transformers/models/mbart/tokenization_mbart_fast.py +++ b/src/transformers/models/mbart/tokenization_mbart_fast.py @@ -268,3 +268,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["MBartTokenizerFast"] diff --git a/src/transformers/models/mbart50/__init__.py b/src/transformers/models/mbart50/__init__.py index b889e374bb6d..93769c97c29b 100644 --- a/src/transformers/models/mbart50/__init__.py +++ b/src/transformers/models/mbart50/__init__.py @@ -13,46 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart50 import MBart50Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mbart50_fast import MBart50TokenizerFast - + from .tokenization_mbart50 import * + from .tokenization_mbart50_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mbart50/tokenization_mbart50.py b/src/transformers/models/mbart50/tokenization_mbart50.py index 7acc6ecbf36b..93888c53870d 100644 --- a/src/transformers/models/mbart50/tokenization_mbart50.py +++ b/src/transformers/models/mbart50/tokenization_mbart50.py @@ -21,6 +21,7 @@ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -33,6 +34,7 @@ FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] # fmt: skip +@export(backends=("sentencepiece",)) class MBart50Tokenizer(PreTrainedTokenizer): """ Construct a MBart50 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -352,3 +354,6 @@ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: self.cur_lang_code_id = self.lang_code_to_id[tgt_lang] self.prefix_tokens = [self.cur_lang_code_id] self.suffix_tokens = [self.eos_token_id] + + +__all__ = ["MBart50Tokenizer"] diff --git a/src/transformers/models/mbart50/tokenization_mbart50_fast.py b/src/transformers/models/mbart50/tokenization_mbart50_fast.py index cc4678f5f53c..45f9ff9e8160 100644 --- a/src/transformers/models/mbart50/tokenization_mbart50_fast.py +++ b/src/transformers/models/mbart50/tokenization_mbart50_fast.py @@ -257,3 +257,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["MBart50TokenizerFast"] diff --git a/src/transformers/models/megatron_bert/__init__.py b/src/transformers/models/megatron_bert/__init__.py index 259e56c25b59..3b28a7831a4b 100644 --- a/src/transformers/models/megatron_bert/__init__.py +++ b/src/transformers/models/megatron_bert/__init__.py @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_megatron_bert": ["MegatronBertConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_megatron_bert"] = [ - "MegatronBertForCausalLM", - "MegatronBertForMaskedLM", - "MegatronBertForMultipleChoice", - "MegatronBertForNextSentencePrediction", - "MegatronBertForPreTraining", - "MegatronBertForQuestionAnswering", - "MegatronBertForSequenceClassification", - "MegatronBertForTokenClassification", - "MegatronBertModel", - "MegatronBertPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_megatron_bert import MegatronBertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_megatron_bert import ( - MegatronBertForCausalLM, - MegatronBertForMaskedLM, - MegatronBertForMultipleChoice, - MegatronBertForNextSentencePrediction, - MegatronBertForPreTraining, - MegatronBertForQuestionAnswering, - MegatronBertForSequenceClassification, - MegatronBertForTokenClassification, - MegatronBertModel, - MegatronBertPreTrainedModel, - ) - + from .configuration_megatron_bert import * + from .modeling_megatron_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/megatron_bert/configuration_megatron_bert.py b/src/transformers/models/megatron_bert/configuration_megatron_bert.py index a0e216a5352d..db81a10a475e 100644 --- a/src/transformers/models/megatron_bert/configuration_megatron_bert.py +++ b/src/transformers/models/megatron_bert/configuration_megatron_bert.py @@ -124,3 +124,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache + + +__all__ = ["MegatronBertConfig"] diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index 16641655e203..60dad08cdcc6 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -1836,3 +1836,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MegatronBertPreTrainedModel", + "MegatronBertModel", + "MegatronBertForPreTraining", + "MegatronBertForCausalLM", + "MegatronBertForMaskedLM", + "MegatronBertForNextSentencePrediction", + "MegatronBertForSequenceClassification", + "MegatronBertForMultipleChoice", + "MegatronBertForTokenClassification", + "MegatronBertForQuestionAnswering", +] diff --git a/src/transformers/models/mgp_str/__init__.py b/src/transformers/models/mgp_str/__init__.py index 901425ca45d6..1ddddf803ca8 100644 --- a/src/transformers/models/mgp_str/__init__.py +++ b/src/transformers/models/mgp_str/__init__.py @@ -1,60 +1,19 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mgp_str": ["MgpstrConfig"], - "processing_mgp_str": ["MgpstrProcessor"], - "tokenization_mgp_str": ["MgpstrTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mgp_str"] = [ - "MgpstrModel", - "MgpstrPreTrainedModel", - "MgpstrForSceneTextRecognition", - ] if TYPE_CHECKING: - from .configuration_mgp_str import MgpstrConfig - from .processing_mgp_str import MgpstrProcessor - from .tokenization_mgp_str import MgpstrTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mgp_str import ( - MgpstrForSceneTextRecognition, - MgpstrModel, - MgpstrPreTrainedModel, - ) + from .configuration_mgp_str import * + from .tokenization_mgp_str import * + from .processing_mgp_str import * + from .modeling_mgp_str import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mgp_str/configuration_mgp_str.py b/src/transformers/models/mgp_str/configuration_mgp_str.py index d7850342dc71..c98dd783751b 100644 --- a/src/transformers/models/mgp_str/configuration_mgp_str.py +++ b/src/transformers/models/mgp_str/configuration_mgp_str.py @@ -132,3 +132,6 @@ def __init__( self.drop_path_rate = drop_path_rate self.output_a3_attentions = output_a3_attentions self.initializer_range = initializer_range + + +__all__ = ["MgpstrConfig"] diff --git a/src/transformers/models/mgp_str/modeling_mgp_str.py b/src/transformers/models/mgp_str/modeling_mgp_str.py index 6b18c45e01d9..d793a8f0f4ae 100644 --- a/src/transformers/models/mgp_str/modeling_mgp_str.py +++ b/src/transformers/models/mgp_str/modeling_mgp_str.py @@ -508,3 +508,6 @@ def forward( attentions=mgp_outputs.attentions, a3_attentions=all_a3_attentions, ) + + +__all__ = ["MgpstrPreTrainedModel", "MgpstrModel", "MgpstrForSceneTextRecognition"] diff --git a/src/transformers/models/mgp_str/processing_mgp_str.py b/src/transformers/models/mgp_str/processing_mgp_str.py index 207d4230ba09..8fd05fd919b2 100644 --- a/src/transformers/models/mgp_str/processing_mgp_str.py +++ b/src/transformers/models/mgp_str/processing_mgp_str.py @@ -16,11 +16,10 @@ import warnings -from transformers import AutoTokenizer -from transformers.utils import is_torch_available -from transformers.utils.generic import ExplicitEnum - +from ...models.auto.tokenization_auto import AutoTokenizer from ...processing_utils import ProcessorMixin +from ...utils import is_torch_available +from ...utils.generic import ExplicitEnum if is_torch_available(): @@ -228,3 +227,6 @@ def wp_decode(self, sequences): """ decode_strs = [seq.replace(" ", "") for seq in self.wp_tokenizer.batch_decode(sequences)] return decode_strs + + +__all__ = ["MgpstrProcessor"] diff --git a/src/transformers/models/mgp_str/tokenization_mgp_str.py b/src/transformers/models/mgp_str/tokenization_mgp_str.py index a34ba744c196..1c224d142f7b 100644 --- a/src/transformers/models/mgp_str/tokenization_mgp_str.py +++ b/src/transformers/models/mgp_str/tokenization_mgp_str.py @@ -99,3 +99,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["MgpstrTokenizer"] diff --git a/src/transformers/models/mistral/__init__.py b/src/transformers/models/mistral/__init__.py index 93e551e19305..18a5657cd2ec 100644 --- a/src/transformers/models/mistral/__init__.py +++ b/src/transformers/models/mistral/__init__.py @@ -13,104 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_mistral": ["MistralConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mistral"] = [ - "MistralForCausalLM", - "MistralModel", - "MistralPreTrainedModel", - "MistralForSequenceClassification", - "MistralForTokenClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_mistral"] = [ - "FlaxMistralForCausalLM", - "FlaxMistralModel", - "FlaxMistralPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mistral"] = [ - "TFMistralModel", - "TFMistralForCausalLM", - "TFMistralForSequenceClassification", - "TFMistralPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mistral import MistralConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mistral import ( - MistralForCausalLM, - MistralForSequenceClassification, - MistralForTokenClassification, - MistralModel, - MistralPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_mistral import ( - FlaxMistralForCausalLM, - FlaxMistralModel, - FlaxMistralPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mistral import ( - TFMistralForCausalLM, - TFMistralForSequenceClassification, - TFMistralModel, - TFMistralPreTrainedModel, - ) - - + from .configuration_mistral import * + from .modeling_flax_mistral import * + from .modeling_mistral import * + from .modeling_tf_mistral import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mistral/configuration_mistral.py b/src/transformers/models/mistral/configuration_mistral.py index c8b63778862b..3d8dc372e991 100644 --- a/src/transformers/models/mistral/configuration_mistral.py +++ b/src/transformers/models/mistral/configuration_mistral.py @@ -149,3 +149,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["MistralConfig"] diff --git a/src/transformers/models/mistral/modeling_flax_mistral.py b/src/transformers/models/mistral/modeling_flax_mistral.py index 3bff2a628122..1c1b99e6d8bf 100644 --- a/src/transformers/models/mistral/modeling_flax_mistral.py +++ b/src/transformers/models/mistral/modeling_flax_mistral.py @@ -701,7 +701,6 @@ def __call__( """, MISTRAL_START_DOCSTRING, ) - # Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Mistral class FlaxMistralForCausalLM(FlaxMistralPreTrainedModel): module_class = FlaxMistralForCausalLMModule @@ -740,3 +739,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, ) + +__all__ = ["FlaxMistralPreTrainedModel", "FlaxMistralModel", "FlaxMistralForCausalLM"] diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 411dc478a1d5..792beb5652a8 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -1338,3 +1338,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MistralPreTrainedModel", + "MistralModel", + "MistralForCausalLM", + "MistralForSequenceClassification", + "MistralForTokenClassification", +] diff --git a/src/transformers/models/mistral/modeling_tf_mistral.py b/src/transformers/models/mistral/modeling_tf_mistral.py index 5c21dd3c3f53..7c5bafe5d78a 100644 --- a/src/transformers/models/mistral/modeling_tf_mistral.py +++ b/src/transformers/models/mistral/modeling_tf_mistral.py @@ -1052,3 +1052,12 @@ def build(self, input_shape=None): if getattr(self, "score", None) is not None: with tf.name_scope(self.score.name): self.score.build((self.config.hidden_size,)) + + +__all__ = [ + "TFMistralPreTrainedModel", + "TFMistralModel", + "TFMistralMainLayer", + "TFMistralForCausalLM", + "TFMistralForSequenceClassification", +] diff --git a/src/transformers/models/mixtral/__init__.py b/src/transformers/models/mixtral/__init__.py index b124d41dfbec..e4ca36bacbee 100644 --- a/src/transformers/models/mixtral/__init__.py +++ b/src/transformers/models/mixtral/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_mixtral": ["MixtralConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mixtral"] = [ - "MixtralForCausalLM", - "MixtralModel", - "MixtralPreTrainedModel", - "MixtralForSequenceClassification", - "MixtralForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mixtral import MixtralConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mixtral import ( - MixtralForCausalLM, - MixtralForSequenceClassification, - MixtralForTokenClassification, - MixtralModel, - MixtralPreTrainedModel, - ) - - + from .configuration_mixtral import * + from .modeling_mixtral import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mixtral/configuration_mixtral.py b/src/transformers/models/mixtral/configuration_mixtral.py index 164988b4dc52..7f2a7071a89f 100644 --- a/src/transformers/models/mixtral/configuration_mixtral.py +++ b/src/transformers/models/mixtral/configuration_mixtral.py @@ -167,3 +167,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["MixtralConfig"] diff --git a/src/transformers/models/mixtral/modeling_mixtral.py b/src/transformers/models/mixtral/modeling_mixtral.py index 22aa9010692a..f9ee6f019924 100644 --- a/src/transformers/models/mixtral/modeling_mixtral.py +++ b/src/transformers/models/mixtral/modeling_mixtral.py @@ -848,6 +848,7 @@ def forward( "The bare Mixtral Model outputting raw hidden-states without any specific head on top.", MIXTRAL_START_DOCSTRING, ) + # Copied from transformers.models.qwen2.modeling_qwen2.Qwen2PreTrainedModel with Qwen2->Mixtral class MixtralPreTrainedModel(PreTrainedModel): config_class = MixtralConfig @@ -946,6 +947,7 @@ def _init_weights(self, module): "The bare Mixtral Model outputting raw hidden-states without any specific head on top.", MIXTRAL_START_DOCSTRING, ) + # copied from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->MIXTRAL,Mistral->Mixtral # TODO @longjie no longer copied from Mistral after static cache class MixtralModel(MixtralPreTrainedModel): @@ -1596,3 +1598,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MixtralPreTrainedModel", + "MixtralModel", + "MixtralForCausalLM", + "MixtralForSequenceClassification", + "MixtralForTokenClassification", +] diff --git a/src/transformers/models/mluke/__init__.py b/src/transformers/models/mluke/__init__.py index aae869bdff51..2a06b46ed3dd 100644 --- a/src/transformers/models/mluke/__init__.py +++ b/src/transformers/models/mluke/__init__.py @@ -11,34 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {} - - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mluke"] = ["MLukeTokenizer"] if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mluke import MLukeTokenizer - - + from .tokenization_mluke import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mluke/tokenization_mluke.py b/src/transformers/models/mluke/tokenization_mluke.py index 3ac8191402af..1db163ebc16d 100644 --- a/src/transformers/models/mluke/tokenization_mluke.py +++ b/src/transformers/models/mluke/tokenization_mluke.py @@ -38,6 +38,7 @@ to_py_obj, ) from ...utils import add_end_docstrings, is_tf_tensor, is_torch_tensor, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -128,6 +129,7 @@ """ +@export(backends=("sentencepiece",)) class MLukeTokenizer(PreTrainedTokenizer): """ Adapted from [`XLMRobertaTokenizer`] and [`LukeTokenizer`]. Based on @@ -1611,3 +1613,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["MLukeTokenizer"] diff --git a/src/transformers/models/mobilebert/__init__.py b/src/transformers/models/mobilebert/__init__.py index c085c3d8636c..7121f6c955fd 100644 --- a/src/transformers/models/mobilebert/__init__.py +++ b/src/transformers/models/mobilebert/__init__.py @@ -11,129 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_mobilebert": [ - "MobileBertConfig", - "MobileBertOnnxConfig", - ], - "tokenization_mobilebert": ["MobileBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mobilebert_fast"] = ["MobileBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilebert"] = [ - "MobileBertForMaskedLM", - "MobileBertForMultipleChoice", - "MobileBertForNextSentencePrediction", - "MobileBertForPreTraining", - "MobileBertForQuestionAnswering", - "MobileBertForSequenceClassification", - "MobileBertForTokenClassification", - "MobileBertLayer", - "MobileBertModel", - "MobileBertPreTrainedModel", - "load_tf_weights_in_mobilebert", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mobilebert"] = [ - "TFMobileBertForMaskedLM", - "TFMobileBertForMultipleChoice", - "TFMobileBertForNextSentencePrediction", - "TFMobileBertForPreTraining", - "TFMobileBertForQuestionAnswering", - "TFMobileBertForSequenceClassification", - "TFMobileBertForTokenClassification", - "TFMobileBertMainLayer", - "TFMobileBertModel", - "TFMobileBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mobilebert import ( - MobileBertConfig, - MobileBertOnnxConfig, - ) - from .tokenization_mobilebert import MobileBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mobilebert_fast import MobileBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilebert import ( - MobileBertForMaskedLM, - MobileBertForMultipleChoice, - MobileBertForNextSentencePrediction, - MobileBertForPreTraining, - MobileBertForQuestionAnswering, - MobileBertForSequenceClassification, - MobileBertForTokenClassification, - MobileBertLayer, - MobileBertModel, - MobileBertPreTrainedModel, - load_tf_weights_in_mobilebert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mobilebert import ( - TFMobileBertForMaskedLM, - TFMobileBertForMultipleChoice, - TFMobileBertForNextSentencePrediction, - TFMobileBertForPreTraining, - TFMobileBertForQuestionAnswering, - TFMobileBertForSequenceClassification, - TFMobileBertForTokenClassification, - TFMobileBertMainLayer, - TFMobileBertModel, - TFMobileBertPreTrainedModel, - ) - + from .configuration_mobilebert import * + from .modeling_mobilebert import * + from .modeling_tf_mobilebert import * + from .tokenization_mobilebert import * + from .tokenization_mobilebert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilebert/configuration_mobilebert.py b/src/transformers/models/mobilebert/configuration_mobilebert.py index 2370fa9b576d..742864573ab2 100644 --- a/src/transformers/models/mobilebert/configuration_mobilebert.py +++ b/src/transformers/models/mobilebert/configuration_mobilebert.py @@ -179,3 +179,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["MobileBertConfig", "MobileBertOnnxConfig"] diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 44007667c6b6..bb663fa5a527 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -1617,3 +1617,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_mobilebert", + "MobileBertPreTrainedModel", + "MobileBertModel", + "MobileBertForPreTraining", + "MobileBertForMaskedLM", + "MobileBertForNextSentencePrediction", + "MobileBertForSequenceClassification", + "MobileBertForQuestionAnswering", + "MobileBertForMultipleChoice", + "MobileBertForTokenClassification", +] diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index d73c276b4f7d..d8895e70e3b4 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -1964,3 +1964,17 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFMobileBertPreTrainedModel", + "TFMobileBertModel", + "TFMobileBertForPreTraining", + "TFMobileBertForMaskedLM", + "TFMobileBertForNextSentencePrediction", + "TFMobileBertForSequenceClassification", + "TFMobileBertForQuestionAnswering", + "TFMobileBertForMultipleChoice", + "TFMobileBertForTokenClassification", + "TFMobileBertMainLayer", +] diff --git a/src/transformers/models/mobilebert/tokenization_mobilebert.py b/src/transformers/models/mobilebert/tokenization_mobilebert.py index 972f57fae0a2..23438dc6aad9 100644 --- a/src/transformers/models/mobilebert/tokenization_mobilebert.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert.py @@ -503,3 +503,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["MobileBertTokenizer"] diff --git a/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py index 21057924092e..ec39eb2b26bd 100644 --- a/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py @@ -172,3 +172,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["MobileBertTokenizerFast"] diff --git a/src/transformers/models/mobilenet_v1/__init__.py b/src/transformers/models/mobilenet_v1/__init__.py index 6ff5725a21a8..390f3d5cd7e3 100644 --- a/src/transformers/models/mobilenet_v1/__init__.py +++ b/src/transformers/models/mobilenet_v1/__init__.py @@ -13,69 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_mobilenet_v1": [ - "MobileNetV1Config", - "MobileNetV1OnnxConfig", - ], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_mobilenet_v1"] = ["MobileNetV1FeatureExtractor"] - _import_structure["image_processing_mobilenet_v1"] = ["MobileNetV1ImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilenet_v1"] = [ - "MobileNetV1ForImageClassification", - "MobileNetV1Model", - "MobileNetV1PreTrainedModel", - "load_tf_weights_in_mobilenet_v1", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mobilenet_v1 import ( - MobileNetV1Config, - MobileNetV1OnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_mobilenet_v1 import MobileNetV1FeatureExtractor - from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilenet_v1 import ( - MobileNetV1ForImageClassification, - MobileNetV1Model, - MobileNetV1PreTrainedModel, - load_tf_weights_in_mobilenet_v1, - ) - - + from .configuration_mobilenet_v1 import * + from .feature_extraction_mobilenet_v1 import * + from .image_processing_mobilenet_v1 import * + from .modeling_mobilenet_v1 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py index 2bf204a66d77..59e977b95212 100644 --- a/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/configuration_mobilenet_v1.py @@ -121,3 +121,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileNetV1Config", "MobileNetV1OnnxConfig"] diff --git a/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py index 34cdb11cd9f3..8f8ec836235d 100644 --- a/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MobileNetV1FeatureExtractor"] diff --git a/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py index 7981947307fd..57aa5080d6e5 100644 --- a/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py @@ -38,11 +38,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MobileNetV1ImageProcessor(BaseImageProcessor): r""" Constructs a MobileNetV1 image processor. @@ -300,3 +302,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["MobileNetV1ImageProcessor"] diff --git a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py index 00f8c501b212..50aade28d894 100755 --- a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py @@ -477,3 +477,11 @@ def forward( logits=logits, hidden_states=outputs.hidden_states, ) + + +__all__ = [ + "load_tf_weights_in_mobilenet_v1", + "MobileNetV1PreTrainedModel", + "MobileNetV1Model", + "MobileNetV1ForImageClassification", +] diff --git a/src/transformers/models/mobilenet_v2/__init__.py b/src/transformers/models/mobilenet_v2/__init__.py index 5fcab8fe7c4e..7dd1c3d5b1a2 100644 --- a/src/transformers/models/mobilenet_v2/__init__.py +++ b/src/transformers/models/mobilenet_v2/__init__.py @@ -13,72 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_mobilenet_v2": [ - "MobileNetV2Config", - "MobileNetV2OnnxConfig", - ], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_mobilenet_v2"] = ["MobileNetV2FeatureExtractor"] - _import_structure["image_processing_mobilenet_v2"] = ["MobileNetV2ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilenet_v2"] = [ - "MobileNetV2ForImageClassification", - "MobileNetV2ForSemanticSegmentation", - "MobileNetV2Model", - "MobileNetV2PreTrainedModel", - "load_tf_weights_in_mobilenet_v2", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mobilenet_v2 import ( - MobileNetV2Config, - MobileNetV2OnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_mobilenet_v2 import MobileNetV2FeatureExtractor - from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilenet_v2 import ( - MobileNetV2ForImageClassification, - MobileNetV2ForSemanticSegmentation, - MobileNetV2Model, - MobileNetV2PreTrainedModel, - load_tf_weights_in_mobilenet_v2, - ) - - + from .configuration_mobilenet_v2 import * + from .feature_extraction_mobilenet_v2 import * + from .image_processing_mobilenet_v2 import * + from .modeling_mobilenet_v2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py index 25bcfa578547..6cf64847e2cd 100644 --- a/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/configuration_mobilenet_v2.py @@ -149,3 +149,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileNetV2Config", "MobileNetV2OnnxConfig"] diff --git a/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py index 62581e2c0998..4dfa653116a4 100644 --- a/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/feature_extraction_mobilenet_v2.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_mobilenet_v2 import MobileNetV2ImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MobileNetV2FeatureExtractor(MobileNetV2ImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MobileNetV2FeatureExtractor"] diff --git a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py index 25d227bd582f..9d64ebce689a 100644 --- a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py @@ -38,6 +38,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging +from ...utils.import_utils import export if is_torch_available(): @@ -47,6 +48,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MobileNetV2ImageProcessor(BaseImageProcessor): r""" Constructs a MobileNetV2 image processor. @@ -347,3 +349,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["MobileNetV2ImageProcessor"] diff --git a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py index 47ec95a79eec..f93255424f97 100755 --- a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py @@ -857,3 +857,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "load_tf_weights_in_mobilenet_v2", + "MobileNetV2PreTrainedModel", + "MobileNetV2Model", + "MobileNetV2ForImageClassification", + "MobileNetV2ForSemanticSegmentation", +] diff --git a/src/transformers/models/mobilevit/__init__.py b/src/transformers/models/mobilevit/__init__.py index 942a963227b9..7f520bab1cc8 100644 --- a/src/transformers/models/mobilevit/__init__.py +++ b/src/transformers/models/mobilevit/__init__.py @@ -13,94 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mobilevit": ["MobileViTConfig", "MobileViTOnnxConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"] - _import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilevit"] = [ - "MobileViTForImageClassification", - "MobileViTForSemanticSegmentation", - "MobileViTModel", - "MobileViTPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mobilevit"] = [ - "TFMobileViTForImageClassification", - "TFMobileViTForSemanticSegmentation", - "TFMobileViTModel", - "TFMobileViTPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mobilevit import MobileViTConfig, MobileViTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_mobilevit import MobileViTFeatureExtractor - from .image_processing_mobilevit import MobileViTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilevit import ( - MobileViTForImageClassification, - MobileViTForSemanticSegmentation, - MobileViTModel, - MobileViTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mobilevit import ( - TFMobileViTForImageClassification, - TFMobileViTForSemanticSegmentation, - TFMobileViTModel, - TFMobileViTPreTrainedModel, - ) - - + from .configuration_mobilevit import * + from .feature_extraction_mobilevit import * + from .image_processing_mobilevit import * + from .modeling_mobilevit import * + from .modeling_tf_mobilevit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilevit/configuration_mobilevit.py b/src/transformers/models/mobilevit/configuration_mobilevit.py index 500f8b23db0a..2ef891a96f6c 100644 --- a/src/transformers/models/mobilevit/configuration_mobilevit.py +++ b/src/transformers/models/mobilevit/configuration_mobilevit.py @@ -167,3 +167,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileViTConfig", "MobileViTOnnxConfig"] diff --git a/src/transformers/models/mobilevit/feature_extraction_mobilevit.py b/src/transformers/models/mobilevit/feature_extraction_mobilevit.py index a73baed6405c..0a820a6771ff 100644 --- a/src/transformers/models/mobilevit/feature_extraction_mobilevit.py +++ b/src/transformers/models/mobilevit/feature_extraction_mobilevit.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_mobilevit import MobileViTImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MobileViTFeatureExtractor(MobileViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["MobileViTFeatureExtractor"] diff --git a/src/transformers/models/mobilevit/image_processing_mobilevit.py b/src/transformers/models/mobilevit/image_processing_mobilevit.py index e6a8692edfd4..7ebf746f126f 100644 --- a/src/transformers/models/mobilevit/image_processing_mobilevit.py +++ b/src/transformers/models/mobilevit/image_processing_mobilevit.py @@ -39,6 +39,7 @@ is_vision_available, logging, ) +from ...utils.import_utils import export if is_vision_available(): @@ -51,6 +52,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class MobileViTImageProcessor(BaseImageProcessor): r""" Constructs a MobileViT image processor. @@ -480,3 +482,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["MobileViTImageProcessor"] diff --git a/src/transformers/models/mobilevit/modeling_mobilevit.py b/src/transformers/models/mobilevit/modeling_mobilevit.py index 59c191b37896..09be9d822910 100755 --- a/src/transformers/models/mobilevit/modeling_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_mobilevit.py @@ -1070,3 +1070,11 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "MobileViTPreTrainedModel", + "MobileViTModel", + "MobileViTForImageClassification", + "MobileViTForSemanticSegmentation", +] diff --git a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py index 499a7942e938..d729f99a9012 100644 --- a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py @@ -1368,3 +1368,12 @@ def build(self, input_shape=None): if getattr(self, "segmentation_head", None) is not None: with tf.name_scope(self.segmentation_head.name): self.segmentation_head.build(None) + + +__all__ = [ + "TFMobileViTPreTrainedModel", + "TFMobileViTModel", + "TFMobileViTForImageClassification", + "TFMobileViTForSemanticSegmentation", + "TFMobileViTMainLayer", +] diff --git a/src/transformers/models/mobilevitv2/__init__.py b/src/transformers/models/mobilevitv2/__init__.py index 770736c03df7..aa6100c82228 100644 --- a/src/transformers/models/mobilevitv2/__init__.py +++ b/src/transformers/models/mobilevitv2/__init__.py @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mobilevitv2": [ - "MobileViTV2Config", - "MobileViTV2OnnxConfig", - ], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mobilevitv2"] = [ - "MobileViTV2ForImageClassification", - "MobileViTV2ForSemanticSegmentation", - "MobileViTV2Model", - "MobileViTV2PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mobilevitv2 import ( - MobileViTV2Config, - MobileViTV2OnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mobilevitv2 import ( - MobileViTV2ForImageClassification, - MobileViTV2ForSemanticSegmentation, - MobileViTV2Model, - MobileViTV2PreTrainedModel, - ) - + from .configuration_mobilevitv2 import * + from .modeling_mobilevitv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py index 65260d6501eb..ff9a5977922f 100644 --- a/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py @@ -163,3 +163,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["MobileViTV2Config", "MobileViTV2OnnxConfig"] diff --git a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py index ae043cf567f1..4efb80163ce0 100644 --- a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py @@ -1025,3 +1025,11 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = [ + "MobileViTV2PreTrainedModel", + "MobileViTV2Model", + "MobileViTV2ForImageClassification", + "MobileViTV2ForSemanticSegmentation", +] diff --git a/src/transformers/models/mpnet/__init__.py b/src/transformers/models/mpnet/__init__.py index 54c20d9f1967..d9163fd17948 100644 --- a/src/transformers/models/mpnet/__init__.py +++ b/src/transformers/models/mpnet/__init__.py @@ -11,116 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_mpnet": ["MPNetConfig"], - "tokenization_mpnet": ["MPNetTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mpnet_fast"] = ["MPNetTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mpnet"] = [ - "MPNetForMaskedLM", - "MPNetForMultipleChoice", - "MPNetForQuestionAnswering", - "MPNetForSequenceClassification", - "MPNetForTokenClassification", - "MPNetLayer", - "MPNetModel", - "MPNetPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mpnet"] = [ - "TFMPNetEmbeddings", - "TFMPNetForMaskedLM", - "TFMPNetForMultipleChoice", - "TFMPNetForQuestionAnswering", - "TFMPNetForSequenceClassification", - "TFMPNetForTokenClassification", - "TFMPNetMainLayer", - "TFMPNetModel", - "TFMPNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mpnet import MPNetConfig - from .tokenization_mpnet import MPNetTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mpnet_fast import MPNetTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mpnet import ( - MPNetForMaskedLM, - MPNetForMultipleChoice, - MPNetForQuestionAnswering, - MPNetForSequenceClassification, - MPNetForTokenClassification, - MPNetLayer, - MPNetModel, - MPNetPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mpnet import ( - TFMPNetEmbeddings, - TFMPNetForMaskedLM, - TFMPNetForMultipleChoice, - TFMPNetForQuestionAnswering, - TFMPNetForSequenceClassification, - TFMPNetForTokenClassification, - TFMPNetMainLayer, - TFMPNetModel, - TFMPNetPreTrainedModel, - ) - + from .configuration_mpnet import * + from .modeling_mpnet import * + from .modeling_tf_mpnet import * + from .tokenization_mpnet import * + from .tokenization_mpnet_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mpnet/configuration_mpnet.py b/src/transformers/models/mpnet/configuration_mpnet.py index 0abb89c9423e..e80d6a0c3030 100644 --- a/src/transformers/models/mpnet/configuration_mpnet.py +++ b/src/transformers/models/mpnet/configuration_mpnet.py @@ -111,3 +111,6 @@ def __init__( self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.relative_attention_num_buckets = relative_attention_num_buckets + + +__all__ = ["MPNetConfig"] diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 11a27f5577da..7f5db64a06ee 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -1050,3 +1050,14 @@ def create_position_ids_from_input_ids(input_ids, padding_idx): mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "MPNetPreTrainedModel", + "MPNetModel", + "MPNetForMaskedLM", + "MPNetForSequenceClassification", + "MPNetForMultipleChoice", + "MPNetForTokenClassification", + "MPNetForQuestionAnswering", +] diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index d1864bd1970e..349c328da9b3 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -1339,3 +1339,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFMPNetPreTrainedModel", + "TFMPNetModel", + "TFMPNetForMaskedLM", + "TFMPNetForSequenceClassification", + "TFMPNetForMultipleChoice", + "TFMPNetForTokenClassification", + "TFMPNetForQuestionAnswering", + "TFMPNetMainLayer", +] diff --git a/src/transformers/models/mpnet/tokenization_mpnet.py b/src/transformers/models/mpnet/tokenization_mpnet.py index 8f152fa34340..c3e744bce9ec 100644 --- a/src/transformers/models/mpnet/tokenization_mpnet.py +++ b/src/transformers/models/mpnet/tokenization_mpnet.py @@ -527,3 +527,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["MPNetTokenizer"] diff --git a/src/transformers/models/mpnet/tokenization_mpnet_fast.py b/src/transformers/models/mpnet/tokenization_mpnet_fast.py index 433c3028fc20..4a0c59d1e4d1 100644 --- a/src/transformers/models/mpnet/tokenization_mpnet_fast.py +++ b/src/transformers/models/mpnet/tokenization_mpnet_fast.py @@ -204,3 +204,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["MPNetTokenizerFast"] diff --git a/src/transformers/models/mpt/__init__.py b/src/transformers/models/mpt/__init__.py index 49b3a0d61fcd..3546e8746cc0 100644 --- a/src/transformers/models/mpt/__init__.py +++ b/src/transformers/models/mpt/__init__.py @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_mpt": ["MptConfig", "MptOnnxConfig"], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mpt"] = [ - "MptForCausalLM", - "MptModel", - "MptPreTrainedModel", - "MptForSequenceClassification", - "MptForTokenClassification", - "MptForQuestionAnswering", - ] if TYPE_CHECKING: - from .configuration_mpt import MptConfig, MptOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mpt import ( - MptForCausalLM, - MptForQuestionAnswering, - MptForSequenceClassification, - MptForTokenClassification, - MptModel, - MptPreTrainedModel, - ) - + from .configuration_mpt import * + from .modeling_mpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mpt/configuration_mpt.py b/src/transformers/models/mpt/configuration_mpt.py index ed822c813ba2..62a0181a045a 100644 --- a/src/transformers/models/mpt/configuration_mpt.py +++ b/src/transformers/models/mpt/configuration_mpt.py @@ -242,3 +242,6 @@ def __init__( self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs) + + +__all__ = ["MptConfig"] diff --git a/src/transformers/models/mpt/modeling_mpt.py b/src/transformers/models/mpt/modeling_mpt.py index 85579636dcc4..b8fa500a6be6 100644 --- a/src/transformers/models/mpt/modeling_mpt.py +++ b/src/transformers/models/mpt/modeling_mpt.py @@ -941,3 +941,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MptPreTrainedModel", + "MptModel", + "MptForCausalLM", + "MptForSequenceClassification", + "MptForTokenClassification", + "MptForQuestionAnswering", +] diff --git a/src/transformers/models/mra/__init__.py b/src/transformers/models/mra/__init__.py index 21d82eb3daba..7422b7f3e85f 100644 --- a/src/transformers/models/mra/__init__.py +++ b/src/transformers/models/mra/__init__.py @@ -1,66 +1,17 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = {"configuration_mra": ["MraConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mra"] = [ - "MraForMaskedLM", - "MraForMultipleChoice", - "MraForQuestionAnswering", - "MraForSequenceClassification", - "MraForTokenClassification", - "MraLayer", - "MraModel", - "MraPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mra import MraConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mra import ( - MraForMaskedLM, - MraForMultipleChoice, - MraForQuestionAnswering, - MraForSequenceClassification, - MraForTokenClassification, - MraLayer, - MraModel, - MraPreTrainedModel, - ) + from .modeling_mra import * + from .configuration_mra import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mra/configuration_mra.py b/src/transformers/models/mra/configuration_mra.py index 6837de4f8021..16b064c98f7e 100644 --- a/src/transformers/models/mra/configuration_mra.py +++ b/src/transformers/models/mra/configuration_mra.py @@ -132,3 +132,6 @@ def __init__( self.approx_mode = approx_mode self.initial_prior_first_n_blocks = initial_prior_first_n_blocks self.initial_prior_diagonal_n_blocks = initial_prior_diagonal_n_blocks + + +__all__ = ["MraConfig"] diff --git a/src/transformers/models/mra/modeling_mra.py b/src/transformers/models/mra/modeling_mra.py index 09b21365937f..5c4f90ce387c 100644 --- a/src/transformers/models/mra/modeling_mra.py +++ b/src/transformers/models/mra/modeling_mra.py @@ -1478,3 +1478,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "MraPreTrainedModel", + "MraModel", + "MraForMaskedLM", + "MraForSequenceClassification", + "MraForMultipleChoice", + "MraForTokenClassification", + "MraForQuestionAnswering", +] diff --git a/src/transformers/models/mt5/__init__.py b/src/transformers/models/mt5/__init__.py index e142aa43676e..2885d08af57e 100644 --- a/src/transformers/models/mt5/__init__.py +++ b/src/transformers/models/mt5/__init__.py @@ -11,113 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -if is_sentencepiece_available(): - from ..t5.tokenization_t5 import T5Tokenizer -else: - from ...utils.dummy_sentencepiece_objects import T5Tokenizer - -MT5Tokenizer = T5Tokenizer - -if is_tokenizers_available(): - from ..t5.tokenization_t5_fast import T5TokenizerFast -else: - from ...utils.dummy_tokenizers_objects import T5TokenizerFast - -MT5TokenizerFast = T5TokenizerFast - -_import_structure = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mt5"] = [ - "MT5EncoderModel", - "MT5ForConditionalGeneration", - "MT5ForQuestionAnswering", - "MT5ForSequenceClassification", - "MT5ForTokenClassification", - "MT5Model", - "MT5PreTrainedModel", - "MT5Stack", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_mt5"] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_mt5"] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_mt5 import MT5Config, MT5OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mt5 import ( - MT5EncoderModel, - MT5ForConditionalGeneration, - MT5ForQuestionAnswering, - MT5ForSequenceClassification, - MT5ForTokenClassification, - MT5Model, - MT5PreTrainedModel, - MT5Stack, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_mt5 import FlaxMT5EncoderModel, FlaxMT5ForConditionalGeneration, FlaxMT5Model - + from .configuration_mt5 import * + from .modeling_flax_mt5 import * + from .modeling_mt5 import * + from .modeling_tf_mt5 import * + from .tokenization_mt5 import * + from .tokenization_mt5_fast import * else: import sys - sys.modules[__name__] = _LazyModule( - __name__, - globals()["__file__"], - _import_structure, - extra_objects={"MT5Tokenizer": MT5Tokenizer, "MT5TokenizerFast": MT5TokenizerFast}, - module_spec=__spec__, - ) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mt5/configuration_mt5.py b/src/transformers/models/mt5/configuration_mt5.py index ef629718b1b5..9d64c5b96a3b 100644 --- a/src/transformers/models/mt5/configuration_mt5.py +++ b/src/transformers/models/mt5/configuration_mt5.py @@ -172,3 +172,6 @@ def default_onnx_opset(self) -> int: @property def atol_for_validation(self) -> float: return 5e-4 + + +__all__ = ["MT5Config", "MT5OnnxConfig"] diff --git a/src/transformers/models/mt5/modeling_flax_mt5.py b/src/transformers/models/mt5/modeling_flax_mt5.py index fbb5b107f55e..0797e25e42c4 100644 --- a/src/transformers/models/mt5/modeling_flax_mt5.py +++ b/src/transformers/models/mt5/modeling_flax_mt5.py @@ -118,3 +118,6 @@ class FlaxMT5ForConditionalGeneration(FlaxT5ForConditionalGeneration): model_type = "mt5" config_class = MT5Config + + +__all__ = ["FlaxMT5Model", "FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration"] diff --git a/src/transformers/models/mt5/modeling_mt5.py b/src/transformers/models/mt5/modeling_mt5.py index 54943cf982dd..75b9336b45f0 100644 --- a/src/transformers/models/mt5/modeling_mt5.py +++ b/src/transformers/models/mt5/modeling_mt5.py @@ -2432,3 +2432,15 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "MT5PreTrainedModel", + "MT5Stack", + "MT5Model", + "MT5ForConditionalGeneration", + "MT5EncoderModel", + "MT5ForSequenceClassification", + "MT5ForTokenClassification", + "MT5ForQuestionAnswering", +] diff --git a/src/transformers/models/mt5/modeling_tf_mt5.py b/src/transformers/models/mt5/modeling_tf_mt5.py index 7270a54948c4..2e427362acd6 100644 --- a/src/transformers/models/mt5/modeling_tf_mt5.py +++ b/src/transformers/models/mt5/modeling_tf_mt5.py @@ -93,3 +93,6 @@ class TFMT5EncoderModel(TFT5EncoderModel): model_type = "mt5" config_class = MT5Config + + +__all__ = ["TFMT5Model", "TFMT5EncoderModel", "TFMT5ForConditionalGeneration"] diff --git a/src/transformers/models/mt5/tokenization_mt5.py b/src/transformers/models/mt5/tokenization_mt5.py new file mode 100644 index 000000000000..3ce7f7d59672 --- /dev/null +++ b/src/transformers/models/mt5/tokenization_mt5.py @@ -0,0 +1,26 @@ +# coding=utf-8 +# Copyright 2020, The T5 Authors and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""mT5 tokenization file""" + +from ...utils.import_utils import export +from ..t5 import T5Tokenizer + + +@export(backends=("sentencepiece",)) +class MT5Tokenizer(T5Tokenizer): + pass + + +__all__ = ["MT5Tokenizer"] diff --git a/src/transformers/models/mt5/tokenization_mt5_fast.py b/src/transformers/models/mt5/tokenization_mt5_fast.py new file mode 100644 index 000000000000..828102022c95 --- /dev/null +++ b/src/transformers/models/mt5/tokenization_mt5_fast.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# Copyright 2020, The T5 Authors and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""mT5 fast tokenization file""" + +from ..t5 import T5TokenizerFast + + +class MT5TokenizerFast(T5TokenizerFast): + pass + + +__all__ = ["MT5TokenizerFast"] diff --git a/src/transformers/models/musicgen/__init__.py b/src/transformers/models/musicgen/__init__.py index 3b03adae12fc..c5d3b68bb4fc 100644 --- a/src/transformers/models/musicgen/__init__.py +++ b/src/transformers/models/musicgen/__init__.py @@ -13,51 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_musicgen": [ - "MusicgenConfig", - "MusicgenDecoderConfig", - ], - "processing_musicgen": ["MusicgenProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_musicgen"] = [ - "MusicgenForConditionalGeneration", - "MusicgenForCausalLM", - "MusicgenModel", - "MusicgenPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_musicgen import ( - MusicgenConfig, - MusicgenDecoderConfig, - ) - from .processing_musicgen import MusicgenProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_musicgen import ( - MusicgenForCausalLM, - MusicgenForConditionalGeneration, - MusicgenModel, - MusicgenPreTrainedModel, - ) - + from .configuration_musicgen import * + from .modeling_musicgen import * + from .processing_musicgen import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/musicgen/configuration_musicgen.py b/src/transformers/models/musicgen/configuration_musicgen.py index ef2e0244c140..78e92672c18e 100644 --- a/src/transformers/models/musicgen/configuration_musicgen.py +++ b/src/transformers/models/musicgen/configuration_musicgen.py @@ -253,3 +253,6 @@ def _attn_implementation(self): def _attn_implementation(self, value): self._attn_implementation_internal = value self.decoder._attn_implementation = value + + +__all__ = ["MusicgenDecoderConfig", "MusicgenConfig"] diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index f720faac038e..fcf3917c3d66 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -2739,3 +2739,12 @@ def get_unconditional_inputs(self, num_samples=1): attention_mask=attention_mask, guidance_scale=1.0, ) + + +__all__ = [ + "MusicgenPreTrainedModel", + "MusicgenDecoder", + "MusicgenModel", + "MusicgenForCausalLM", + "MusicgenForConditionalGeneration", +] diff --git a/src/transformers/models/musicgen/processing_musicgen.py b/src/transformers/models/musicgen/processing_musicgen.py index c153c5dfe1b9..deebf9045b4f 100644 --- a/src/transformers/models/musicgen/processing_musicgen.py +++ b/src/transformers/models/musicgen/processing_musicgen.py @@ -139,3 +139,6 @@ def _decode_audio(self, audio_values, padding_mask: Optional = None) -> List[np. audio_values[i] = sliced_audio.reshape(channels, -1) return audio_values + + +__all__ = ["MusicgenProcessor"] diff --git a/src/transformers/models/musicgen_melody/__init__.py b/src/transformers/models/musicgen_melody/__init__.py index 20c8507aaed7..f7594d668761 100644 --- a/src/transformers/models/musicgen_melody/__init__.py +++ b/src/transformers/models/musicgen_melody/__init__.py @@ -13,74 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_torchaudio_available, -) - - -_import_structure = { - "configuration_musicgen_melody": [ - "MusicgenMelodyConfig", - "MusicgenMelodyDecoderConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_musicgen_melody"] = [ - "MusicgenMelodyForConditionalGeneration", - "MusicgenMelodyForCausalLM", - "MusicgenMelodyModel", - "MusicgenMelodyPreTrainedModel", - ] - -try: - if not is_torchaudio_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_musicgen_melody"] = ["MusicgenMelodyFeatureExtractor"] - _import_structure["processing_musicgen_melody"] = ["MusicgenMelodyProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_musicgen_melody import ( - MusicgenMelodyConfig, - MusicgenMelodyDecoderConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_musicgen_melody import ( - MusicgenMelodyForCausalLM, - MusicgenMelodyForConditionalGeneration, - MusicgenMelodyModel, - MusicgenMelodyPreTrainedModel, - ) - - try: - if not is_torchaudio_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor - from .processing_musicgen_melody import MusicgenMelodyProcessor - - + from .configuration_musicgen_melody import * + from .feature_extraction_musicgen_melody import * + from .modeling_musicgen_melody import * + from .processing_musicgen_melody import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py b/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py index b29187facb3d..15d74da1dd90 100644 --- a/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/configuration_musicgen_melody.py @@ -267,3 +267,6 @@ def _attn_implementation(self): def _attn_implementation(self, value): self._attn_implementation_internal = value self.decoder._attn_implementation = value + + +__all__ = ["MusicgenMelodyDecoderConfig", "MusicgenMelodyConfig"] diff --git a/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py b/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py index ac83f3ac8df0..ef5933f4eda6 100644 --- a/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py @@ -25,6 +25,7 @@ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, is_torch_available, is_torchaudio_available, logging +from ...utils.import_utils import export if is_torch_available(): @@ -36,6 +37,7 @@ logger = logging.get_logger(__name__) +@export(backends=("torchaudio",)) class MusicgenMelodyFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a MusicgenMelody feature extractor. @@ -329,3 +331,6 @@ def to_dict(self) -> Dict[str, Any]: if "spectrogram" in output: del output["spectrogram"] return output + + +__all__ = ["MusicgenMelodyFeatureExtractor"] diff --git a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py index a8a8fe960989..71e74fa3442b 100644 --- a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py @@ -2575,3 +2575,12 @@ def _update_model_kwargs_for_generation( ) return model_kwargs + + +__all__ = [ + "MusicgenMelodyPreTrainedModel", + "MusicgenMelodyDecoder", + "MusicgenMelodyModel", + "MusicgenMelodyForCausalLM", + "MusicgenMelodyForConditionalGeneration", +] diff --git a/src/transformers/models/musicgen_melody/processing_musicgen_melody.py b/src/transformers/models/musicgen_melody/processing_musicgen_melody.py index 34b1d1ec4d6d..c10b0572d259 100644 --- a/src/transformers/models/musicgen_melody/processing_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/processing_musicgen_melody.py @@ -22,8 +22,10 @@ from ...processing_utils import ProcessorMixin from ...utils import to_numpy +from ...utils.import_utils import export +@export(backends=("torchaudio",)) class MusicgenMelodyProcessor(ProcessorMixin): r""" Constructs a MusicGen Melody processor which wraps a Wav2Vec2 feature extractor - for raw audio waveform processing - and a T5 tokenizer into a single processor @@ -173,3 +175,6 @@ def get_unconditional_inputs(self, num_samples=1, return_tensors="pt"): inputs["attention_mask"][:] = 0 return inputs + + +__all__ = ["MusicgenMelodyProcessor"] diff --git a/src/transformers/models/mvp/__init__.py b/src/transformers/models/mvp/__init__.py index e865b8827c5c..e2d6da46aa9a 100644 --- a/src/transformers/models/mvp/__init__.py +++ b/src/transformers/models/mvp/__init__.py @@ -13,65 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_mvp": ["MvpConfig", "MvpOnnxConfig"], - "tokenization_mvp": ["MvpTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_mvp_fast"] = ["MvpTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_mvp"] = [ - "MvpForCausalLM", - "MvpForConditionalGeneration", - "MvpForQuestionAnswering", - "MvpForSequenceClassification", - "MvpModel", - "MvpPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_mvp import MvpConfig, MvpOnnxConfig - from .tokenization_mvp import MvpTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_mvp_fast import MvpTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_mvp import ( - MvpForCausalLM, - MvpForConditionalGeneration, - MvpForQuestionAnswering, - MvpForSequenceClassification, - MvpModel, - MvpPreTrainedModel, - ) - + from .configuration_mvp import * + from .modeling_mvp import * + from .tokenization_mvp import * + from .tokenization_mvp_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/mvp/configuration_mvp.py b/src/transformers/models/mvp/configuration_mvp.py index 8e2317982b57..a270461db40d 100644 --- a/src/transformers/models/mvp/configuration_mvp.py +++ b/src/transformers/models/mvp/configuration_mvp.py @@ -178,3 +178,6 @@ def __init__( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " "The config can simply be saved and uploaded again to be fixed." ) + + +__all__ = ["MvpConfig"] diff --git a/src/transformers/models/mvp/modeling_mvp.py b/src/transformers/models/mvp/modeling_mvp.py index 319f1760cef9..5159dde0e4f0 100644 --- a/src/transformers/models/mvp/modeling_mvp.py +++ b/src/transformers/models/mvp/modeling_mvp.py @@ -2005,3 +2005,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "MvpPreTrainedModel", + "MvpModel", + "MvpForConditionalGeneration", + "MvpForSequenceClassification", + "MvpForQuestionAnswering", + "MvpForCausalLM", +] diff --git a/src/transformers/models/mvp/tokenization_mvp.py b/src/transformers/models/mvp/tokenization_mvp.py index 5a159320b7a3..e3a32082cce8 100644 --- a/src/transformers/models/mvp/tokenization_mvp.py +++ b/src/transformers/models/mvp/tokenization_mvp.py @@ -389,3 +389,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["MvpTokenizer"] diff --git a/src/transformers/models/mvp/tokenization_mvp_fast.py b/src/transformers/models/mvp/tokenization_mvp_fast.py index 5901c2bece40..a66b4e178e8a 100644 --- a/src/transformers/models/mvp/tokenization_mvp_fast.py +++ b/src/transformers/models/mvp/tokenization_mvp_fast.py @@ -277,3 +277,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["MvpTokenizerFast"] diff --git a/src/transformers/models/nllb/__init__.py b/src/transformers/models/nllb/__init__.py index 49e0e5c675ac..ebd78988f2e5 100644 --- a/src/transformers/models/nllb/__init__.py +++ b/src/transformers/models/nllb/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_nllb"] = ["NllbTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_nllb import NllbTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_nllb_fast import NllbTokenizerFast - + from .tokenization_nllb import * + from .tokenization_nllb_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nllb/tokenization_nllb.py b/src/transformers/models/nllb/tokenization_nllb.py index b5ae28b81273..d6229ef7536b 100644 --- a/src/transformers/models/nllb/tokenization_nllb.py +++ b/src/transformers/models/nllb/tokenization_nllb.py @@ -21,6 +21,7 @@ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -33,6 +34,7 @@ FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] # fmt: skip +@export(backends=("sentencepiece",)) class NllbTokenizer(PreTrainedTokenizer): """ Construct an NLLB tokenizer. @@ -387,3 +389,6 @@ def set_tgt_lang_special_tokens(self, lang: str) -> None: else: self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] + + +__all__ = ["NllbTokenizer"] diff --git a/src/transformers/models/nllb/tokenization_nllb_fast.py b/src/transformers/models/nllb/tokenization_nllb_fast.py index 013dbc97b35d..80b00e342462 100644 --- a/src/transformers/models/nllb/tokenization_nllb_fast.py +++ b/src/transformers/models/nllb/tokenization_nllb_fast.py @@ -326,3 +326,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["NllbTokenizerFast"] diff --git a/src/transformers/models/nllb_moe/__init__.py b/src/transformers/models/nllb_moe/__init__.py index ccb961ba38e8..b2c1b6cb1de9 100644 --- a/src/transformers/models/nllb_moe/__init__.py +++ b/src/transformers/models/nllb_moe/__init__.py @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_nllb_moe": ["NllbMoeConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nllb_moe"] = [ - "NllbMoeForConditionalGeneration", - "NllbMoeModel", - "NllbMoePreTrainedModel", - "NllbMoeTop2Router", - "NllbMoeSparseMLP", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_nllb_moe import ( - NllbMoeConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nllb_moe import ( - NllbMoeForConditionalGeneration, - NllbMoeModel, - NllbMoePreTrainedModel, - NllbMoeSparseMLP, - NllbMoeTop2Router, - ) - - + from .configuration_nllb_moe import * + from .modeling_nllb_moe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nllb_moe/configuration_nllb_moe.py b/src/transformers/models/nllb_moe/configuration_nllb_moe.py index ef12c199ef4a..c2d7f7f11699 100644 --- a/src/transformers/models/nllb_moe/configuration_nllb_moe.py +++ b/src/transformers/models/nllb_moe/configuration_nllb_moe.py @@ -214,3 +214,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["NllbMoeConfig"] diff --git a/src/transformers/models/nllb_moe/modeling_nllb_moe.py b/src/transformers/models/nllb_moe/modeling_nllb_moe.py index 2bec0fb84dce..62647efc65b8 100644 --- a/src/transformers/models/nllb_moe/modeling_nllb_moe.py +++ b/src/transformers/models/nllb_moe/modeling_nllb_moe.py @@ -1806,3 +1806,12 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "NllbMoePreTrainedModel", + "NllbMoeModel", + "NllbMoeForConditionalGeneration", + "NllbMoeSparseMLP", + "NllbMoeTop2Router", +] diff --git a/src/transformers/models/nougat/__init__.py b/src/transformers/models/nougat/__init__.py index 3cc8bbddf9e9..9b07d80a638e 100644 --- a/src/transformers/models/nougat/__init__.py +++ b/src/transformers/models/nougat/__init__.py @@ -13,51 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_vision_available - - -_import_structure = { - "processing_nougat": ["NougatProcessor"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_nougat_fast"] = ["NougatTokenizerFast"] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_nougat"] = ["NougatImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .processing_nougat import NougatProcessor - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_nougat_fast import NougatTokenizerFast - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_nougat import NougatImageProcessor - - + from .image_processing_nougat import * + from .processing_nougat import * + from .tokenization_nougat_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nougat/image_processing_nougat.py b/src/transformers/models/nougat/image_processing_nougat.py index 792f4a14325a..a1f742d2233e 100644 --- a/src/transformers/models/nougat/image_processing_nougat.py +++ b/src/transformers/models/nougat/image_processing_nougat.py @@ -41,7 +41,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging -from ...utils.import_utils import is_cv2_available, is_vision_available +from ...utils.import_utils import export, is_cv2_available, is_vision_available logger = logging.get_logger(__name__) @@ -55,6 +55,7 @@ import PIL +@export(backends=("vision",)) class NougatImageProcessor(BaseImageProcessor): r""" Constructs a Nougat image processor. @@ -509,3 +510,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["NougatImageProcessor"] diff --git a/src/transformers/models/nougat/processing_nougat.py b/src/transformers/models/nougat/processing_nougat.py index 8f94c6718ba6..58a13454e862 100644 --- a/src/transformers/models/nougat/processing_nougat.py +++ b/src/transformers/models/nougat/processing_nougat.py @@ -158,3 +158,6 @@ def post_process_generation(self, *args, **kwargs): Please refer to the docstring of this method for more information. """ return self.tokenizer.post_process_generation(*args, **kwargs) + + +__all__ = ["NougatProcessor"] diff --git a/src/transformers/models/nougat/tokenization_nougat_fast.py b/src/transformers/models/nougat/tokenization_nougat_fast.py index 0a7eec4ad98a..6a554738717e 100644 --- a/src/transformers/models/nougat/tokenization_nougat_fast.py +++ b/src/transformers/models/nougat/tokenization_nougat_fast.py @@ -624,3 +624,6 @@ def post_process_generation( return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation] else: return self.post_process_single(generation, fix_markdown=fix_markdown) + + +__all__ = ["NougatTokenizerFast"] diff --git a/src/transformers/models/nystromformer/__init__.py b/src/transformers/models/nystromformer/__init__.py index 74f8a620204f..a2550512dc5a 100644 --- a/src/transformers/models/nystromformer/__init__.py +++ b/src/transformers/models/nystromformer/__init__.py @@ -13,53 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_nystromformer": ["NystromformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_nystromformer"] = [ - "NystromformerForMaskedLM", - "NystromformerForMultipleChoice", - "NystromformerForQuestionAnswering", - "NystromformerForSequenceClassification", - "NystromformerForTokenClassification", - "NystromformerLayer", - "NystromformerModel", - "NystromformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_nystromformer import NystromformerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_nystromformer import ( - NystromformerForMaskedLM, - NystromformerForMultipleChoice, - NystromformerForQuestionAnswering, - NystromformerForSequenceClassification, - NystromformerForTokenClassification, - NystromformerLayer, - NystromformerModel, - NystromformerPreTrainedModel, - ) - - + from .configuration_nystromformer import * + from .modeling_nystromformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/nystromformer/configuration_nystromformer.py b/src/transformers/models/nystromformer/configuration_nystromformer.py index e52b02d9f88a..96a48b99fda4 100644 --- a/src/transformers/models/nystromformer/configuration_nystromformer.py +++ b/src/transformers/models/nystromformer/configuration_nystromformer.py @@ -127,3 +127,6 @@ def __init__( self.inv_coeff_init_option = inv_coeff_init_option self.layer_norm_eps = layer_norm_eps super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + +__all__ = ["NystromformerConfig"] diff --git a/src/transformers/models/nystromformer/modeling_nystromformer.py b/src/transformers/models/nystromformer/modeling_nystromformer.py index 4bb4c33fff62..307956a0c35c 100755 --- a/src/transformers/models/nystromformer/modeling_nystromformer.py +++ b/src/transformers/models/nystromformer/modeling_nystromformer.py @@ -1110,3 +1110,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "NystromformerPreTrainedModel", + "NystromformerModel", + "NystromformerForMaskedLM", + "NystromformerForSequenceClassification", + "NystromformerForMultipleChoice", + "NystromformerForTokenClassification", + "NystromformerForQuestionAnswering", +] diff --git a/src/transformers/models/olmo/__init__.py b/src/transformers/models/olmo/__init__.py index b94350cd3310..139af5473e41 100644 --- a/src/transformers/models/olmo/__init__.py +++ b/src/transformers/models/olmo/__init__.py @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_olmo": ["OlmoConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_olmo"] = [ - "OlmoForCausalLM", - "OlmoModel", - "OlmoPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_olmo import OlmoConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_olmo import ( - OlmoForCausalLM, - OlmoModel, - OlmoPreTrainedModel, - ) - + from .configuration_olmo import * + from .modeling_olmo import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/olmo/configuration_olmo.py b/src/transformers/models/olmo/configuration_olmo.py index 77a3b18e364e..d3edb90772f2 100644 --- a/src/transformers/models/olmo/configuration_olmo.py +++ b/src/transformers/models/olmo/configuration_olmo.py @@ -179,3 +179,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["OlmoConfig"] diff --git a/src/transformers/models/olmo/modeling_olmo.py b/src/transformers/models/olmo/modeling_olmo.py index 007e69570e78..ddaaac511d28 100644 --- a/src/transformers/models/olmo/modeling_olmo.py +++ b/src/transformers/models/olmo/modeling_olmo.py @@ -1227,3 +1227,6 @@ def prepare_inputs_for_generation( } ) return model_inputs + + +__all__ = ["OlmoPreTrainedModel", "OlmoModel", "OlmoForCausalLM"] diff --git a/src/transformers/models/oneformer/__init__.py b/src/transformers/models/oneformer/__init__.py index 11ddde65d059..e11c7ee2c8fc 100644 --- a/src/transformers/models/oneformer/__init__.py +++ b/src/transformers/models/oneformer/__init__.py @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_oneformer": ["OneFormerConfig"], - "processing_oneformer": ["OneFormerProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_oneformer"] = ["OneFormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_oneformer"] = [ - "OneFormerForUniversalSegmentation", - "OneFormerModel", - "OneFormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_oneformer import OneFormerConfig - from .processing_oneformer import OneFormerProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_oneformer import OneFormerImageProcessor - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_oneformer import ( - OneFormerForUniversalSegmentation, - OneFormerModel, - OneFormerPreTrainedModel, - ) - - + from .configuration_oneformer import * + from .image_processing_oneformer import * + from .modeling_oneformer import * + from .processing_oneformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/oneformer/configuration_oneformer.py b/src/transformers/models/oneformer/configuration_oneformer.py index 86f56a1f571b..b289032afa24 100644 --- a/src/transformers/models/oneformer/configuration_oneformer.py +++ b/src/transformers/models/oneformer/configuration_oneformer.py @@ -272,3 +272,6 @@ def __init__( self.num_hidden_layers = decoder_layers super().__init__(**kwargs) + + +__all__ = ["OneFormerConfig"] diff --git a/src/transformers/models/oneformer/image_processing_oneformer.py b/src/transformers/models/oneformer/image_processing_oneformer.py index 1fefddc07b80..947410b41349 100644 --- a/src/transformers/models/oneformer/image_processing_oneformer.py +++ b/src/transformers/models/oneformer/image_processing_oneformer.py @@ -53,6 +53,7 @@ logging, ) from ...utils.deprecation import deprecate_kwarg +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -372,6 +373,7 @@ def load_metadata(repo_id, class_info_file): return class_info +@export(backends=("vision",)) class OneFormerImageProcessor(BaseImageProcessor): r""" Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and @@ -1351,3 +1353,6 @@ def post_process_panoptic_segmentation( results.append({"segmentation": segmentation, "segments_info": segments}) return results + + +__all__ = ["OneFormerImageProcessor"] diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index 9c2f66220715..4270c190c339 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -3255,3 +3255,6 @@ def forward( if loss is not None: output = (loss) + output return output + + +__all__ = ["OneFormerPreTrainedModel", "OneFormerModel", "OneFormerForUniversalSegmentation"] diff --git a/src/transformers/models/oneformer/processing_oneformer.py b/src/transformers/models/oneformer/processing_oneformer.py index 9e55be5d6731..78fef3283cd8 100644 --- a/src/transformers/models/oneformer/processing_oneformer.py +++ b/src/transformers/models/oneformer/processing_oneformer.py @@ -202,3 +202,6 @@ def post_process_panoptic_segmentation(self, *args, **kwargs): Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs) + + +__all__ = ["OneFormerProcessor"] diff --git a/src/transformers/models/openai/__init__.py b/src/transformers/models/openai/__init__.py index af4ebbfee663..0a38ea5b1b53 100644 --- a/src/transformers/models/openai/__init__.py +++ b/src/transformers/models/openai/__init__.py @@ -11,105 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_openai": ["OpenAIGPTConfig"], - "tokenization_openai": ["OpenAIGPTTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_openai_fast"] = ["OpenAIGPTTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_openai"] = [ - "OpenAIGPTDoubleHeadsModel", - "OpenAIGPTForSequenceClassification", - "OpenAIGPTLMHeadModel", - "OpenAIGPTModel", - "OpenAIGPTPreTrainedModel", - "load_tf_weights_in_openai_gpt", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_openai"] = [ - "TFOpenAIGPTDoubleHeadsModel", - "TFOpenAIGPTForSequenceClassification", - "TFOpenAIGPTLMHeadModel", - "TFOpenAIGPTMainLayer", - "TFOpenAIGPTModel", - "TFOpenAIGPTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_openai import OpenAIGPTConfig - from .tokenization_openai import OpenAIGPTTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_openai_fast import OpenAIGPTTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_openai import ( - OpenAIGPTDoubleHeadsModel, - OpenAIGPTForSequenceClassification, - OpenAIGPTLMHeadModel, - OpenAIGPTModel, - OpenAIGPTPreTrainedModel, - load_tf_weights_in_openai_gpt, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_openai import ( - TFOpenAIGPTDoubleHeadsModel, - TFOpenAIGPTForSequenceClassification, - TFOpenAIGPTLMHeadModel, - TFOpenAIGPTMainLayer, - TFOpenAIGPTModel, - TFOpenAIGPTPreTrainedModel, - ) - + from .configuration_openai import * + from .modeling_openai import * + from .modeling_tf_openai import * + from .tokenization_openai import * + from .tokenization_openai_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/openai/configuration_openai.py b/src/transformers/models/openai/configuration_openai.py index dde668b32f7d..b4f2fae9d304 100644 --- a/src/transformers/models/openai/configuration_openai.py +++ b/src/transformers/models/openai/configuration_openai.py @@ -151,3 +151,6 @@ def __init__( self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels super().__init__(**kwargs) + + +__all__ = ["OpenAIGPTConfig"] diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 2b24850f3f0c..7d90ad78cabd 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -853,3 +853,13 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_openai_gpt", + "OpenAIGPTPreTrainedModel", + "OpenAIGPTModel", + "OpenAIGPTLMHeadModel", + "OpenAIGPTDoubleHeadsModel", + "OpenAIGPTForSequenceClassification", +] diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index 0f911c1245f7..41223fb618c8 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -935,3 +935,13 @@ def build(self, input_shape=None): if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) + + +__all__ = [ + "TFOpenAIGPTPreTrainedModel", + "TFOpenAIGPTModel", + "TFOpenAIGPTLMHeadModel", + "TFOpenAIGPTDoubleHeadsModel", + "TFOpenAIGPTForSequenceClassification", + "TFOpenAIGPTMainLayer", +] diff --git a/src/transformers/models/openai/tokenization_openai.py b/src/transformers/models/openai/tokenization_openai.py index 091dc5697314..cbfb41fc888f 100644 --- a/src/transformers/models/openai/tokenization_openai.py +++ b/src/transformers/models/openai/tokenization_openai.py @@ -391,3 +391,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = index += 1 return vocab_file, merge_file + + +__all__ = ["OpenAIGPTTokenizer"] diff --git a/src/transformers/models/openai/tokenization_openai_fast.py b/src/transformers/models/openai/tokenization_openai_fast.py index 41f4c8db9061..c17d7d29b7dd 100644 --- a/src/transformers/models/openai/tokenization_openai_fast.py +++ b/src/transformers/models/openai/tokenization_openai_fast.py @@ -61,3 +61,6 @@ def do_lower_case(self): def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["OpenAIGPTTokenizerFast"] diff --git a/src/transformers/models/opt/__init__.py b/src/transformers/models/opt/__init__.py index 5ae39344b2ff..326968370c04 100644 --- a/src/transformers/models/opt/__init__.py +++ b/src/transformers/models/opt/__init__.py @@ -13,87 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_opt": ["OPTConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_opt"] = [ - "OPTForCausalLM", - "OPTModel", - "OPTPreTrainedModel", - "OPTForSequenceClassification", - "OPTForQuestionAnswering", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_opt"] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_opt"] = [ - "FlaxOPTForCausalLM", - "FlaxOPTModel", - "FlaxOPTPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_opt import OPTConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_opt import ( - OPTForCausalLM, - OPTForQuestionAnswering, - OPTForSequenceClassification, - OPTModel, - OPTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel - + from .configuration_opt import * + from .modeling_flax_opt import * + from .modeling_opt import * + from .modeling_tf_opt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/opt/configuration_opt.py b/src/transformers/models/opt/configuration_opt.py index 455a6362a725..8f8838ad9ef1 100644 --- a/src/transformers/models/opt/configuration_opt.py +++ b/src/transformers/models/opt/configuration_opt.py @@ -141,3 +141,6 @@ def __init__( # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 self._remove_final_layer_norm = _remove_final_layer_norm + + +__all__ = ["OPTConfig"] diff --git a/src/transformers/models/opt/modeling_flax_opt.py b/src/transformers/models/opt/modeling_flax_opt.py index c6296e4eeae0..f1c688b9043d 100644 --- a/src/transformers/models/opt/modeling_flax_opt.py +++ b/src/transformers/models/opt/modeling_flax_opt.py @@ -797,3 +797,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxBaseModelOutput, _CONFIG_FOR_DOC, ) + +__all__ = ["FlaxOPTPreTrainedModel", "FlaxOPTModel", "FlaxOPTForCausalLM"] diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 8f058171778e..4f2223833fd3 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -1345,3 +1345,12 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value + + +__all__ = [ + "OPTPreTrainedModel", + "OPTModel", + "OPTForCausalLM", + "OPTForSequenceClassification", + "OPTForQuestionAnswering", +] diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index 9c5dfa4ade61..1ab3d7d8b366 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -1092,3 +1092,6 @@ def build(self, input_shape=None): if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) + + +__all__ = ["TFOPTPreTrainedModel", "TFOPTModel", "TFOPTForCausalLM", "TFOPTMainLayer"] diff --git a/src/transformers/models/owlv2/__init__.py b/src/transformers/models/owlv2/__init__.py index 83d432766d69..af0682412cbe 100644 --- a/src/transformers/models/owlv2/__init__.py +++ b/src/transformers/models/owlv2/__init__.py @@ -13,77 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_owlv2": [ - "Owlv2Config", - "Owlv2TextConfig", - "Owlv2VisionConfig", - ], - "processing_owlv2": ["Owlv2Processor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_owlv2"] = ["Owlv2ImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_owlv2"] = [ - "Owlv2Model", - "Owlv2PreTrainedModel", - "Owlv2TextModel", - "Owlv2VisionModel", - "Owlv2ForObjectDetection", - ] - if TYPE_CHECKING: - from .configuration_owlv2 import ( - Owlv2Config, - Owlv2TextConfig, - Owlv2VisionConfig, - ) - from .processing_owlv2 import Owlv2Processor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_owlv2 import Owlv2ImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_owlv2 import ( - Owlv2ForObjectDetection, - Owlv2Model, - Owlv2PreTrainedModel, - Owlv2TextModel, - Owlv2VisionModel, - ) - + from .configuration_owlv2 import * + from .image_processing_owlv2 import * + from .modeling_owlv2 import * + from .processing_owlv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index 43019553c5c6..fbcea3e62be2 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -332,3 +332,6 @@ def from_text_vision_configs(cls, text_config: Dict, vision_config: Dict, **kwar config_dict["vision_config"] = vision_config return cls.from_dict(config_dict, **kwargs) + + +__all__ = ["Owlv2TextConfig", "Owlv2VisionConfig", "Owlv2Config"] diff --git a/src/transformers/models/owlv2/image_processing_owlv2.py b/src/transformers/models/owlv2/image_processing_owlv2.py index dd32dc9f1411..c76dd61dcd76 100644 --- a/src/transformers/models/owlv2/image_processing_owlv2.py +++ b/src/transformers/models/owlv2/image_processing_owlv2.py @@ -48,6 +48,7 @@ logging, requires_backends, ) +from ...utils.import_utils import export if is_torch_available(): @@ -175,6 +176,7 @@ def _clip_warp_output(input_image, output_image): return output_image +@export(backends=("vision",)) class Owlv2ImageProcessor(BaseImageProcessor): r""" Constructs an OWLv2 image processor. @@ -607,3 +609,6 @@ def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_thresh results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results + + +__all__ = ["Owlv2ImageProcessor"] diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index bc6735ff86b5..4f120a268f1f 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -1733,3 +1733,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["Owlv2PreTrainedModel", "Owlv2TextModel", "Owlv2VisionModel", "Owlv2Model", "Owlv2ForObjectDetection"] diff --git a/src/transformers/models/owlv2/processing_owlv2.py b/src/transformers/models/owlv2/processing_owlv2.py index 8b580ca50266..4a0b5a712e9d 100644 --- a/src/transformers/models/owlv2/processing_owlv2.py +++ b/src/transformers/models/owlv2/processing_owlv2.py @@ -188,3 +188,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["Owlv2Processor"] diff --git a/src/transformers/models/owlvit/__init__.py b/src/transformers/models/owlvit/__init__.py index a6da47da9a0f..0c98de327b9b 100644 --- a/src/transformers/models/owlvit/__init__.py +++ b/src/transformers/models/owlvit/__init__.py @@ -13,84 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_owlvit": [ - "OwlViTConfig", - "OwlViTOnnxConfig", - "OwlViTTextConfig", - "OwlViTVisionConfig", - ], - "processing_owlvit": ["OwlViTProcessor"], -} - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_owlvit"] = ["OwlViTFeatureExtractor"] - _import_structure["image_processing_owlvit"] = ["OwlViTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_owlvit"] = [ - "OwlViTModel", - "OwlViTPreTrainedModel", - "OwlViTTextModel", - "OwlViTVisionModel", - "OwlViTForObjectDetection", - ] - if TYPE_CHECKING: - from .configuration_owlvit import ( - OwlViTConfig, - OwlViTOnnxConfig, - OwlViTTextConfig, - OwlViTVisionConfig, - ) - from .processing_owlvit import OwlViTProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_owlvit import OwlViTFeatureExtractor - from .image_processing_owlvit import OwlViTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_owlvit import ( - OwlViTForObjectDetection, - OwlViTModel, - OwlViTPreTrainedModel, - OwlViTTextModel, - OwlViTVisionModel, - ) - + from .configuration_owlvit import * + from .feature_extraction_owlvit import * + from .image_processing_owlvit import * + from .modeling_owlvit import * + from .processing_owlvit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index 877b348f32c1..b3311ddfea8b 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -378,3 +378,6 @@ def generate_dummy_inputs( @property def default_onnx_opset(self) -> int: return 14 + + +__all__ = ["OwlViTTextConfig", "OwlViTVisionConfig", "OwlViTConfig", "OwlViTOnnxConfig"] diff --git a/src/transformers/models/owlvit/feature_extraction_owlvit.py b/src/transformers/models/owlvit/feature_extraction_owlvit.py index f85fd7f31ea4..e930edf4e295 100644 --- a/src/transformers/models/owlvit/feature_extraction_owlvit.py +++ b/src/transformers/models/owlvit/feature_extraction_owlvit.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_owlvit import OwlViTImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class OwlViTFeatureExtractor(OwlViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["OwlViTFeatureExtractor"] diff --git a/src/transformers/models/owlvit/image_processing_owlvit.py b/src/transformers/models/owlvit/image_processing_owlvit.py index 63c2d6089559..1fda907f72c0 100644 --- a/src/transformers/models/owlvit/image_processing_owlvit.py +++ b/src/transformers/models/owlvit/image_processing_owlvit.py @@ -41,6 +41,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, logging +from ...utils.import_utils import export if is_torch_available(): @@ -89,6 +90,7 @@ def box_iou(boxes1, boxes2): return iou, union +@export(backends=("vision",)) class OwlViTImageProcessor(BaseImageProcessor): r""" Constructs an OWL-ViT image processor. @@ -596,3 +598,6 @@ def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_thresh results.append({"scores": box_scores, "labels": None, "boxes": boxes}) return results + + +__all__ = ["OwlViTImageProcessor"] diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 94b815985878..aff842f5a242 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -1670,3 +1670,6 @@ def forward( text_model_output=text_outputs, vision_model_output=vision_outputs, ) + + +__all__ = ["OwlViTPreTrainedModel", "OwlViTTextModel", "OwlViTVisionModel", "OwlViTModel", "OwlViTForObjectDetection"] diff --git a/src/transformers/models/owlvit/processing_owlvit.py b/src/transformers/models/owlvit/processing_owlvit.py index 2c7d490104bd..49e913a384eb 100644 --- a/src/transformers/models/owlvit/processing_owlvit.py +++ b/src/transformers/models/owlvit/processing_owlvit.py @@ -222,3 +222,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["OwlViTProcessor"] diff --git a/src/transformers/models/patchtsmixer/__init__.py b/src/transformers/models/patchtsmixer/__init__.py index b227ca1655c4..2c0299dcb670 100644 --- a/src/transformers/models/patchtsmixer/__init__.py +++ b/src/transformers/models/patchtsmixer/__init__.py @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_patchtsmixer": ["PatchTSMixerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_patchtsmixer"] = [ - "PatchTSMixerPreTrainedModel", - "PatchTSMixerModel", - "PatchTSMixerForPretraining", - "PatchTSMixerForPrediction", - "PatchTSMixerForTimeSeriesClassification", - "PatchTSMixerForRegression", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_patchtsmixer import ( - PatchTSMixerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_patchtsmixer import ( - PatchTSMixerForPrediction, - PatchTSMixerForPretraining, - PatchTSMixerForRegression, - PatchTSMixerForTimeSeriesClassification, - PatchTSMixerModel, - PatchTSMixerPreTrainedModel, - ) - + from .configuration_patchtsmixer import * + from .modeling_patchtsmixer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py b/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py index 10089a3fef6e..83f374651a7c 100644 --- a/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py @@ -230,3 +230,6 @@ def __init__( self.unmasked_channel_indices = unmasked_channel_indices self.norm_eps = norm_eps super().__init__(**kwargs) + + +__all__ = ["PatchTSMixerConfig"] diff --git a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py index 209975b65e8f..37b764160616 100644 --- a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py @@ -2170,3 +2170,13 @@ def generate( # [batch_size x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples) + + +__all__ = [ + "PatchTSMixerPreTrainedModel", + "PatchTSMixerModel", + "PatchTSMixerForPretraining", + "PatchTSMixerForPrediction", + "PatchTSMixerForTimeSeriesClassification", + "PatchTSMixerForRegression", +] diff --git a/src/transformers/models/patchtst/__init__.py b/src/transformers/models/patchtst/__init__.py index 5ba6316505af..3b03211e5bfe 100644 --- a/src/transformers/models/patchtst/__init__.py +++ b/src/transformers/models/patchtst/__init__.py @@ -13,49 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_patchtst": ["PatchTSTConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_patchtst"] = [ - "PatchTSTModel", - "PatchTSTPreTrainedModel", - "PatchTSTForPrediction", - "PatchTSTForPretraining", - "PatchTSTForRegression", - "PatchTSTForClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_patchtst import PatchTSTConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_patchtst import ( - PatchTSTForClassification, - PatchTSTForPrediction, - PatchTSTForPretraining, - PatchTSTForRegression, - PatchTSTModel, - PatchTSTPreTrainedModel, - ) - + from .configuration_patchtst import * + from .modeling_patchtst import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/patchtst/configuration_patchtst.py b/src/transformers/models/patchtst/configuration_patchtst.py index 29d14491752c..be19f2383acd 100644 --- a/src/transformers/models/patchtst/configuration_patchtst.py +++ b/src/transformers/models/patchtst/configuration_patchtst.py @@ -251,3 +251,6 @@ def __init__( self.output_range = output_range super().__init__(**kwargs) + + +__all__ = ["PatchTSTConfig"] diff --git a/src/transformers/models/patchtst/modeling_patchtst.py b/src/transformers/models/patchtst/modeling_patchtst.py index 3c761bcae77a..ed50b9c4ce9c 100755 --- a/src/transformers/models/patchtst/modeling_patchtst.py +++ b/src/transformers/models/patchtst/modeling_patchtst.py @@ -2030,3 +2030,13 @@ def generate( # samples: [bs x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSTOutput(sequences=samples) + + +__all__ = [ + "PatchTSTPreTrainedModel", + "PatchTSTModel", + "PatchTSTForPretraining", + "PatchTSTForClassification", + "PatchTSTForPrediction", + "PatchTSTForRegression", +] diff --git a/src/transformers/models/pegasus/__init__.py b/src/transformers/models/pegasus/__init__.py index 15ac3b56cff0..0e85834b10ea 100644 --- a/src/transformers/models/pegasus/__init__.py +++ b/src/transformers/models/pegasus/__init__.py @@ -13,126 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_pegasus": ["PegasusConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_pegasus"] = ["PegasusTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_pegasus_fast"] = ["PegasusTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pegasus"] = [ - "PegasusForCausalLM", - "PegasusForConditionalGeneration", - "PegasusModel", - "PegasusPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_pegasus"] = [ - "TFPegasusForConditionalGeneration", - "TFPegasusModel", - "TFPegasusPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_pegasus"] = [ - "FlaxPegasusForConditionalGeneration", - "FlaxPegasusModel", - "FlaxPegasusPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pegasus import PegasusConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_pegasus import PegasusTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_pegasus_fast import PegasusTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pegasus import ( - PegasusForCausalLM, - PegasusForConditionalGeneration, - PegasusModel, - PegasusPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_pegasus import ( - FlaxPegasusForConditionalGeneration, - FlaxPegasusModel, - FlaxPegasusPreTrainedModel, - ) - + from .configuration_pegasus import * + from .modeling_flax_pegasus import * + from .modeling_pegasus import * + from .modeling_tf_pegasus import * + from .tokenization_pegasus import * + from .tokenization_pegasus_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pegasus/configuration_pegasus.py b/src/transformers/models/pegasus/configuration_pegasus.py index 2cc49857f3c9..23bff5d7719f 100644 --- a/src/transformers/models/pegasus/configuration_pegasus.py +++ b/src/transformers/models/pegasus/configuration_pegasus.py @@ -159,3 +159,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["PegasusConfig"] diff --git a/src/transformers/models/pegasus/modeling_flax_pegasus.py b/src/transformers/models/pegasus/modeling_flax_pegasus.py index e50fc1710c6a..5ac3535520dd 100644 --- a/src/transformers/models/pegasus/modeling_flax_pegasus.py +++ b/src/transformers/models/pegasus/modeling_flax_pegasus.py @@ -1527,3 +1527,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxPegasusForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = ["FlaxPegasusPreTrainedModel", "FlaxPegasusModel", "FlaxPegasusForConditionalGeneration"] diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 42cef3a63558..20c33aa252ee 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -1698,3 +1698,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["PegasusPreTrainedModel", "PegasusModel", "PegasusForConditionalGeneration", "PegasusForCausalLM"] diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 45e9fdbbed75..253296cc3e61 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -1569,3 +1569,6 @@ def build(self, input_shape=None): if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) + + +__all__ = ["TFPegasusPreTrainedModel", "TFPegasusModel", "TFPegasusForConditionalGeneration", "TFPegasusMainLayer"] diff --git a/src/transformers/models/pegasus/tokenization_pegasus.py b/src/transformers/models/pegasus/tokenization_pegasus.py index 2763b739a964..a6a746ea08d4 100644 --- a/src/transformers/models/pegasus/tokenization_pegasus.py +++ b/src/transformers/models/pegasus/tokenization_pegasus.py @@ -20,6 +20,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export SPIECE_UNDERLINE = "▁" @@ -31,6 +32,7 @@ # TODO ArthurZ refactor this to only use the added_tokens_encoder +@export(backends=("sentencepiece",)) class PegasusTokenizer(PreTrainedTokenizer): r""" Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -283,3 +285,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["PegasusTokenizer"] diff --git a/src/transformers/models/pegasus/tokenization_pegasus_fast.py b/src/transformers/models/pegasus/tokenization_pegasus_fast.py index 11ccb1ff4a15..af62976cb751 100644 --- a/src/transformers/models/pegasus/tokenization_pegasus_fast.py +++ b/src/transformers/models/pegasus/tokenization_pegasus_fast.py @@ -214,3 +214,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["PegasusTokenizerFast"] diff --git a/src/transformers/models/pegasus_x/__init__.py b/src/transformers/models/pegasus_x/__init__.py index ce26210d3bc6..c3e75207c59f 100644 --- a/src/transformers/models/pegasus_x/__init__.py +++ b/src/transformers/models/pegasus_x/__init__.py @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_pegasus_x": ["PegasusXConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pegasus_x"] = [ - "PegasusXForConditionalGeneration", - "PegasusXModel", - "PegasusXPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pegasus_x import PegasusXConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pegasus_x import ( - PegasusXForConditionalGeneration, - PegasusXModel, - PegasusXPreTrainedModel, - ) - - + from .configuration_pegasus_x import * + from .modeling_pegasus_x import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pegasus_x/configuration_pegasus_x.py b/src/transformers/models/pegasus_x/configuration_pegasus_x.py index b84c79656ef0..c92f5662b599 100644 --- a/src/transformers/models/pegasus_x/configuration_pegasus_x.py +++ b/src/transformers/models/pegasus_x/configuration_pegasus_x.py @@ -172,3 +172,6 @@ def num_attention_heads(self) -> int: @property def hidden_size(self) -> int: return self.d_model + + +__all__ = ["PegasusXConfig"] diff --git a/src/transformers/models/pegasus_x/modeling_pegasus_x.py b/src/transformers/models/pegasus_x/modeling_pegasus_x.py index 6d9072777bf6..730a3c9888ce 100755 --- a/src/transformers/models/pegasus_x/modeling_pegasus_x.py +++ b/src/transformers/models/pegasus_x/modeling_pegasus_x.py @@ -1646,3 +1646,6 @@ def __init__(self, config): def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) + + +__all__ = ["PegasusXPreTrainedModel", "PegasusXModel", "PegasusXForConditionalGeneration"] diff --git a/src/transformers/models/perceiver/__init__.py b/src/transformers/models/perceiver/__init__.py index 5cc52d619772..8f1e846d28af 100644 --- a/src/transformers/models/perceiver/__init__.py +++ b/src/transformers/models/perceiver/__init__.py @@ -13,82 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tokenizers_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_perceiver": ["PerceiverConfig", "PerceiverOnnxConfig"], - "tokenization_perceiver": ["PerceiverTokenizer"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_perceiver"] = ["PerceiverFeatureExtractor"] - _import_structure["image_processing_perceiver"] = ["PerceiverImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_perceiver"] = [ - "PerceiverForImageClassificationConvProcessing", - "PerceiverForImageClassificationFourier", - "PerceiverForImageClassificationLearned", - "PerceiverForMaskedLM", - "PerceiverForMultimodalAutoencoding", - "PerceiverForOpticalFlow", - "PerceiverForSequenceClassification", - "PerceiverLayer", - "PerceiverModel", - "PerceiverPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_perceiver import PerceiverConfig, PerceiverOnnxConfig - from .tokenization_perceiver import PerceiverTokenizer - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_perceiver import PerceiverFeatureExtractor - from .image_processing_perceiver import PerceiverImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_perceiver import ( - PerceiverForImageClassificationConvProcessing, - PerceiverForImageClassificationFourier, - PerceiverForImageClassificationLearned, - PerceiverForMaskedLM, - PerceiverForMultimodalAutoencoding, - PerceiverForOpticalFlow, - PerceiverForSequenceClassification, - PerceiverLayer, - PerceiverModel, - PerceiverPreTrainedModel, - ) - + from .configuration_perceiver import * + from .feature_extraction_perceiver import * + from .image_processing_perceiver import * + from .modeling_perceiver import * + from .tokenization_perceiver import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/perceiver/configuration_perceiver.py b/src/transformers/models/perceiver/configuration_perceiver.py index e2c9cca4c30f..32e1f885be83 100644 --- a/src/transformers/models/perceiver/configuration_perceiver.py +++ b/src/transformers/models/perceiver/configuration_perceiver.py @@ -212,7 +212,6 @@ def generate_dummy_inputs( image_height: int = 40, ) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified - if isinstance(preprocessor, PreTrainedTokenizerBase): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( @@ -239,3 +238,6 @@ def generate_dummy_inputs( raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) + + +__all__ = ["PerceiverConfig", "PerceiverOnnxConfig"] diff --git a/src/transformers/models/perceiver/feature_extraction_perceiver.py b/src/transformers/models/perceiver/feature_extraction_perceiver.py index 35f2a6c5c9e7..4a11167212b4 100644 --- a/src/transformers/models/perceiver/feature_extraction_perceiver.py +++ b/src/transformers/models/perceiver/feature_extraction_perceiver.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_perceiver import PerceiverImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class PerceiverFeatureExtractor(PerceiverImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["PerceiverFeatureExtractor"] diff --git a/src/transformers/models/perceiver/image_processing_perceiver.py b/src/transformers/models/perceiver/image_processing_perceiver.py index faacc873b9b0..25f448e03c91 100644 --- a/src/transformers/models/perceiver/image_processing_perceiver.py +++ b/src/transformers/models/perceiver/image_processing_perceiver.py @@ -35,6 +35,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -44,6 +45,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class PerceiverImageProcessor(BaseImageProcessor): r""" Constructs a Perceiver image processor. @@ -346,3 +348,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["PerceiverImageProcessor"] diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index b6c233c76112..9a86d053b0c4 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -3499,3 +3499,16 @@ def forward( final_inputs = torch.cat(padded_ls, dim=1) return final_inputs, modality_sizes, inputs_without_pos + + +__all__ = [ + "PerceiverPreTrainedModel", + "PerceiverModel", + "PerceiverForMaskedLM", + "PerceiverForSequenceClassification", + "PerceiverForImageClassificationLearned", + "PerceiverForImageClassificationFourier", + "PerceiverForImageClassificationConvProcessing", + "PerceiverForOpticalFlow", + "PerceiverForMultimodalAutoencoding", +] diff --git a/src/transformers/models/perceiver/tokenization_perceiver.py b/src/transformers/models/perceiver/tokenization_perceiver.py index 90686b78dce0..2a7fe6f43c6d 100644 --- a/src/transformers/models/perceiver/tokenization_perceiver.py +++ b/src/transformers/models/perceiver/tokenization_perceiver.py @@ -195,3 +195,6 @@ def convert_tokens_to_string(self, tokens): # PerceiverTokenizer has no vocab file def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: return () + + +__all__ = ["PerceiverTokenizer"] diff --git a/src/transformers/models/persimmon/__init__.py b/src/transformers/models/persimmon/__init__.py index e1f24ca1b7c2..028038367b16 100644 --- a/src/transformers/models/persimmon/__init__.py +++ b/src/transformers/models/persimmon/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_persimmon": ["PersimmonConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_persimmon"] = [ - "PersimmonForCausalLM", - "PersimmonModel", - "PersimmonPreTrainedModel", - "PersimmonForSequenceClassification", - "PersimmonForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_persimmon import PersimmonConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_persimmon import ( - PersimmonForCausalLM, - PersimmonForSequenceClassification, - PersimmonForTokenClassification, - PersimmonModel, - PersimmonPreTrainedModel, - ) - - + from .configuration_persimmon import * + from .modeling_persimmon import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/persimmon/configuration_persimmon.py b/src/transformers/models/persimmon/configuration_persimmon.py index 11f4c66d73e6..20e9ec8c2abb 100644 --- a/src/transformers/models/persimmon/configuration_persimmon.py +++ b/src/transformers/models/persimmon/configuration_persimmon.py @@ -157,3 +157,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["PersimmonConfig"] diff --git a/src/transformers/models/persimmon/modeling_persimmon.py b/src/transformers/models/persimmon/modeling_persimmon.py index df16557423d7..e499d8db46df 100644 --- a/src/transformers/models/persimmon/modeling_persimmon.py +++ b/src/transformers/models/persimmon/modeling_persimmon.py @@ -1246,3 +1246,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "PersimmonPreTrainedModel", + "PersimmonModel", + "PersimmonForCausalLM", + "PersimmonForSequenceClassification", + "PersimmonForTokenClassification", +] diff --git a/src/transformers/models/phi/__init__.py b/src/transformers/models/phi/__init__.py index 662c0a9bf348..cffe33da73ee 100644 --- a/src/transformers/models/phi/__init__.py +++ b/src/transformers/models/phi/__init__.py @@ -11,57 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_phi": ["PhiConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_phi"] = [ - "PhiPreTrainedModel", - "PhiModel", - "PhiForCausalLM", - "PhiForSequenceClassification", - "PhiForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_phi import PhiConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_phi import ( - PhiForCausalLM, - PhiForSequenceClassification, - PhiForTokenClassification, - PhiModel, - PhiPreTrainedModel, - ) - - + from .configuration_phi import * + from .modeling_phi import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/phi/configuration_phi.py b/src/transformers/models/phi/configuration_phi.py index e54d400ae6e7..aca8f4e2d59c 100644 --- a/src/transformers/models/phi/configuration_phi.py +++ b/src/transformers/models/phi/configuration_phi.py @@ -184,3 +184,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["PhiConfig"] diff --git a/src/transformers/models/phi/modeling_phi.py b/src/transformers/models/phi/modeling_phi.py index a3039a5aa14a..7e9d855df56b 100644 --- a/src/transformers/models/phi/modeling_phi.py +++ b/src/transformers/models/phi/modeling_phi.py @@ -1543,3 +1543,12 @@ def forward( hidden_states=model_outputs.hidden_states, attentions=model_outputs.attentions, ) + + +__all__ = [ + "PhiPreTrainedModel", + "PhiModel", + "PhiForCausalLM", + "PhiForSequenceClassification", + "PhiForTokenClassification", +] diff --git a/src/transformers/models/phi3/__init__.py b/src/transformers/models/phi3/__init__.py index bfe766dfac9f..a3326fcbe556 100644 --- a/src/transformers/models/phi3/__init__.py +++ b/src/transformers/models/phi3/__init__.py @@ -11,57 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_phi3": ["Phi3Config"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_phi3"] = [ - "Phi3PreTrainedModel", - "Phi3Model", - "Phi3ForCausalLM", - "Phi3ForSequenceClassification", - "Phi3ForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_phi3 import Phi3Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_phi3 import ( - Phi3ForCausalLM, - Phi3ForSequenceClassification, - Phi3ForTokenClassification, - Phi3Model, - Phi3PreTrainedModel, - ) - - + from .configuration_phi3 import * + from .modeling_phi3 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/phi3/configuration_phi3.py b/src/transformers/models/phi3/configuration_phi3.py index 4940f43e5bff..361c43c99eca 100644 --- a/src/transformers/models/phi3/configuration_phi3.py +++ b/src/transformers/models/phi3/configuration_phi3.py @@ -219,3 +219,6 @@ def _rope_scaling_validation(self): raise ValueError( f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}" ) + + +__all__ = ["Phi3Config"] diff --git a/src/transformers/models/phi3/modeling_phi3.py b/src/transformers/models/phi3/modeling_phi3.py index f021c6ce2d33..511eebb64801 100644 --- a/src/transformers/models/phi3/modeling_phi3.py +++ b/src/transformers/models/phi3/modeling_phi3.py @@ -1583,3 +1583,12 @@ def forward( hidden_states=model_outputs.hidden_states, attentions=model_outputs.attentions, ) + + +__all__ = [ + "Phi3PreTrainedModel", + "Phi3Model", + "Phi3ForCausalLM", + "Phi3ForSequenceClassification", + "Phi3ForTokenClassification", +] diff --git a/src/transformers/models/phobert/__init__.py b/src/transformers/models/phobert/__init__.py index c974d994eca0..b4e6162db09c 100644 --- a/src/transformers/models/phobert/__init__.py +++ b/src/transformers/models/phobert/__init__.py @@ -11,19 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_phobert": ["PhobertTokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_phobert import PhobertTokenizer - + from .tokenization_phobert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/phobert/tokenization_phobert.py b/src/transformers/models/phobert/tokenization_phobert.py index 85450f4d8e26..471b3a89f853 100644 --- a/src/transformers/models/phobert/tokenization_phobert.py +++ b/src/transformers/models/phobert/tokenization_phobert.py @@ -346,3 +346,6 @@ def add_from_file(self, f): raise ValueError("Incorrect dictionary format, expected ' '") word = line[:idx] self.encoder[word] = len(self.encoder) + + +__all__ = ["PhobertTokenizer"] diff --git a/src/transformers/models/pix2struct/__init__.py b/src/transformers/models/pix2struct/__init__.py index 581d5d7240c6..70476605c94e 100644 --- a/src/transformers/models/pix2struct/__init__.py +++ b/src/transformers/models/pix2struct/__init__.py @@ -13,70 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_pix2struct": [ - "Pix2StructConfig", - "Pix2StructTextConfig", - "Pix2StructVisionConfig", - ], - "processing_pix2struct": ["Pix2StructProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pix2struct"] = [ - "Pix2StructPreTrainedModel", - "Pix2StructForConditionalGeneration", - "Pix2StructVisionModel", - "Pix2StructTextModel", - ] - if TYPE_CHECKING: - from .configuration_pix2struct import ( - Pix2StructConfig, - Pix2StructTextConfig, - Pix2StructVisionConfig, - ) - from .processing_pix2struct import Pix2StructProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pix2struct import Pix2StructImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pix2struct import ( - Pix2StructForConditionalGeneration, - Pix2StructPreTrainedModel, - Pix2StructTextModel, - Pix2StructVisionModel, - ) - + from .configuration_pix2struct import * + from .image_processing_pix2struct import * + from .modeling_pix2struct import * + from .processing_pix2struct import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pix2struct/configuration_pix2struct.py b/src/transformers/models/pix2struct/configuration_pix2struct.py index d74bb84ce6ab..4ab0c46b8de6 100644 --- a/src/transformers/models/pix2struct/configuration_pix2struct.py +++ b/src/transformers/models/pix2struct/configuration_pix2struct.py @@ -382,3 +382,6 @@ def from_text_vision_configs( """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["Pix2StructTextConfig", "Pix2StructVisionConfig", "Pix2StructConfig"] diff --git a/src/transformers/models/pix2struct/image_processing_pix2struct.py b/src/transformers/models/pix2struct/image_processing_pix2struct.py index 466997c8d823..98674f9ffc22 100644 --- a/src/transformers/models/pix2struct/image_processing_pix2struct.py +++ b/src/transformers/models/pix2struct/image_processing_pix2struct.py @@ -33,7 +33,7 @@ valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging -from ...utils.import_utils import requires_backends +from ...utils.import_utils import export, requires_backends if is_vision_available(): @@ -186,6 +186,7 @@ def render_header( return new_image +@export(backends=("vision",)) class Pix2StructImageProcessor(BaseImageProcessor): r""" Constructs a Pix2Struct image processor. @@ -459,3 +460,6 @@ def preprocess( ) return encoded_outputs + + +__all__ = ["Pix2StructImageProcessor"] diff --git a/src/transformers/models/pix2struct/modeling_pix2struct.py b/src/transformers/models/pix2struct/modeling_pix2struct.py index 94d882c80566..76b12a49f2ab 100644 --- a/src/transformers/models/pix2struct/modeling_pix2struct.py +++ b/src/transformers/models/pix2struct/modeling_pix2struct.py @@ -1781,3 +1781,11 @@ def prepare_inputs_for_generation( "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } + + +__all__ = [ + "Pix2StructPreTrainedModel", + "Pix2StructVisionModel", + "Pix2StructTextModel", + "Pix2StructForConditionalGeneration", +] diff --git a/src/transformers/models/pix2struct/processing_pix2struct.py b/src/transformers/models/pix2struct/processing_pix2struct.py index 269fa8c62fb2..990f57425446 100644 --- a/src/transformers/models/pix2struct/processing_pix2struct.py +++ b/src/transformers/models/pix2struct/processing_pix2struct.py @@ -161,3 +161,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["Pix2StructProcessor"] diff --git a/src/transformers/models/plbart/__init__.py b/src/transformers/models/plbart/__init__.py index cd4c46fad3dd..bb0bd1e97cef 100644 --- a/src/transformers/models/plbart/__init__.py +++ b/src/transformers/models/plbart/__init__.py @@ -13,67 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_plbart": ["PLBartConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_plbart"] = ["PLBartTokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_plbart"] = [ - "PLBartForCausalLM", - "PLBartForConditionalGeneration", - "PLBartForSequenceClassification", - "PLBartModel", - "PLBartPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_plbart import PLBartConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_plbart import PLBartTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_plbart import ( - PLBartForCausalLM, - PLBartForConditionalGeneration, - PLBartForSequenceClassification, - PLBartModel, - PLBartPreTrainedModel, - ) - - + from .configuration_plbart import * + from .modeling_plbart import * + from .tokenization_plbart import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/plbart/configuration_plbart.py b/src/transformers/models/plbart/configuration_plbart.py index 86dbc0cec83c..30871c4b7259 100644 --- a/src/transformers/models/plbart/configuration_plbart.py +++ b/src/transformers/models/plbart/configuration_plbart.py @@ -188,3 +188,6 @@ def outputs(self) -> Mapping[str, Mapping[int, str]]: ("encoder_last_hidden_state", {0: "batch", 1: "sequence"}), ] ) + + +__all__ = ["PLBartConfig"] diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index 93d91e160089..554a480f8ad9 100644 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -1780,3 +1780,12 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = [ + "PLBartPreTrainedModel", + "PLBartModel", + "PLBartForConditionalGeneration", + "PLBartForSequenceClassification", + "PLBartForCausalLM", +] diff --git a/src/transformers/models/plbart/tokenization_plbart.py b/src/transformers/models/plbart/tokenization_plbart.py index 9ab2e33f7f0d..139c642ab6c6 100644 --- a/src/transformers/models/plbart/tokenization_plbart.py +++ b/src/transformers/models/plbart/tokenization_plbart.py @@ -21,6 +21,7 @@ from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -46,6 +47,7 @@ } +@export(backends=("sentencepiece",)) class PLBartTokenizer(PreTrainedTokenizer): """ Construct an PLBART tokenizer. @@ -423,3 +425,6 @@ def _convert_lang_code_special_format(self, lang: str) -> str: """Convert Language Codes to format tokenizer uses if required""" lang = FAIRSEQ_LANGUAGE_CODES_MAP[lang] if lang in FAIRSEQ_LANGUAGE_CODES_MAP.keys() else lang return lang + + +__all__ = ["PLBartTokenizer"] diff --git a/src/transformers/models/poolformer/__init__.py b/src/transformers/models/poolformer/__init__.py index 00c345463697..957e0fdc1a10 100644 --- a/src/transformers/models/poolformer/__init__.py +++ b/src/transformers/models/poolformer/__init__.py @@ -13,67 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_poolformer": [ - "PoolFormerConfig", - "PoolFormerOnnxConfig", - ] -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_poolformer"] = ["PoolFormerFeatureExtractor"] - _import_structure["image_processing_poolformer"] = ["PoolFormerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_poolformer"] = [ - "PoolFormerForImageClassification", - "PoolFormerModel", - "PoolFormerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_poolformer import ( - PoolFormerConfig, - PoolFormerOnnxConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_poolformer import PoolFormerFeatureExtractor - from .image_processing_poolformer import PoolFormerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_poolformer import ( - PoolFormerForImageClassification, - PoolFormerModel, - PoolFormerPreTrainedModel, - ) - - + from .configuration_poolformer import * + from .feature_extraction_poolformer import * + from .image_processing_poolformer import * + from .modeling_poolformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/poolformer/configuration_poolformer.py b/src/transformers/models/poolformer/configuration_poolformer.py index a7467b380ec3..cdaf13063138 100644 --- a/src/transformers/models/poolformer/configuration_poolformer.py +++ b/src/transformers/models/poolformer/configuration_poolformer.py @@ -143,3 +143,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 2e-3 + + +__all__ = ["PoolFormerConfig", "PoolFormerOnnxConfig"] diff --git a/src/transformers/models/poolformer/feature_extraction_poolformer.py b/src/transformers/models/poolformer/feature_extraction_poolformer.py index 79ffa037eed3..051c6136141d 100644 --- a/src/transformers/models/poolformer/feature_extraction_poolformer.py +++ b/src/transformers/models/poolformer/feature_extraction_poolformer.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_poolformer import PoolFormerImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class PoolFormerFeatureExtractor(PoolFormerImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["PoolFormerFeatureExtractor"] diff --git a/src/transformers/models/poolformer/image_processing_poolformer.py b/src/transformers/models/poolformer/image_processing_poolformer.py index 669617f95973..4c1f110992b1 100644 --- a/src/transformers/models/poolformer/image_processing_poolformer.py +++ b/src/transformers/models/poolformer/image_processing_poolformer.py @@ -38,6 +38,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -47,6 +48,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class PoolFormerImageProcessor(BaseImageProcessor): r""" Constructs a PoolFormer image processor. @@ -355,3 +357,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["PoolFormerImageProcessor"] diff --git a/src/transformers/models/poolformer/modeling_poolformer.py b/src/transformers/models/poolformer/modeling_poolformer.py index e70974507b77..c5b0329ca36f 100755 --- a/src/transformers/models/poolformer/modeling_poolformer.py +++ b/src/transformers/models/poolformer/modeling_poolformer.py @@ -443,3 +443,6 @@ def forward( return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + +__all__ = ["PoolFormerPreTrainedModel", "PoolFormerModel", "PoolFormerForImageClassification"] diff --git a/src/transformers/models/pop2piano/__init__.py b/src/transformers/models/pop2piano/__init__.py index cd664cb8a70c..7143affc546b 100644 --- a/src/transformers/models/pop2piano/__init__.py +++ b/src/transformers/models/pop2piano/__init__.py @@ -13,108 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_essentia_available, - is_librosa_available, - is_pretty_midi_available, - is_scipy_available, - is_torch_available, -) - - -_import_structure = { - "configuration_pop2piano": ["Pop2PianoConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pop2piano"] = [ - "Pop2PianoForConditionalGeneration", - "Pop2PianoPreTrainedModel", - ] - -try: - if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_pop2piano"] = ["Pop2PianoFeatureExtractor"] - -try: - if not (is_pretty_midi_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_pop2piano"] = ["Pop2PianoTokenizer"] - -try: - if not ( - is_pretty_midi_available() - and is_torch_available() - and is_librosa_available() - and is_essentia_available() - and is_scipy_available() - ): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["processing_pop2piano"] = ["Pop2PianoProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pop2piano import Pop2PianoConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pop2piano import ( - Pop2PianoForConditionalGeneration, - Pop2PianoPreTrainedModel, - ) - - try: - if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_pop2piano import Pop2PianoFeatureExtractor - - try: - if not (is_pretty_midi_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_pop2piano import Pop2PianoTokenizer - - try: - if not ( - is_pretty_midi_available() - and is_torch_available() - and is_librosa_available() - and is_essentia_available() - and is_scipy_available() - ): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .processing_pop2piano import Pop2PianoProcessor - + from .configuration_pop2piano import * + from .feature_extraction_pop2piano import * + from .modeling_pop2piano import * + from .processing_pop2piano import * + from .tokenization_pop2piano import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pop2piano/configuration_pop2piano.py b/src/transformers/models/pop2piano/configuration_pop2piano.py index 51043dab0c43..484e1a4f933e 100644 --- a/src/transformers/models/pop2piano/configuration_pop2piano.py +++ b/src/transformers/models/pop2piano/configuration_pop2piano.py @@ -122,3 +122,6 @@ def __init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) + + +__all__ = ["Pop2PianoConfig"] diff --git a/src/transformers/models/pop2piano/feature_extraction_pop2piano.py b/src/transformers/models/pop2piano/feature_extraction_pop2piano.py index 738b932355d1..c18ef6d59815 100644 --- a/src/transformers/models/pop2piano/feature_extraction_pop2piano.py +++ b/src/transformers/models/pop2piano/feature_extraction_pop2piano.py @@ -31,6 +31,7 @@ logging, requires_backends, ) +from ...utils.import_utils import export if is_essentia_available(): @@ -47,6 +48,15 @@ logger = logging.get_logger(__name__) +@export( + backends=( + "essentia", + "librosa", + "pretty_midi", + "scipy", + "torch", + ) +) class Pop2PianoFeatureExtractor(SequenceFeatureExtractor): r""" Constructs a Pop2Piano feature extractor. @@ -448,3 +458,6 @@ def __call__( ) return output + + +__all__ = ["Pop2PianoFeatureExtractor"] diff --git a/src/transformers/models/pop2piano/modeling_pop2piano.py b/src/transformers/models/pop2piano/modeling_pop2piano.py index c769cff3c454..b37aafa1a393 100644 --- a/src/transformers/models/pop2piano/modeling_pop2piano.py +++ b/src/transformers/models/pop2piano/modeling_pop2piano.py @@ -1357,3 +1357,6 @@ def _reorder_cache(self, past_key_values, beam_idx): reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past + + +__all__ = ["Pop2PianoPreTrainedModel", "Pop2PianoForConditionalGeneration"] diff --git a/src/transformers/models/pop2piano/processing_pop2piano.py b/src/transformers/models/pop2piano/processing_pop2piano.py index 280e5dc79600..dbdbb7d9bd3b 100644 --- a/src/transformers/models/pop2piano/processing_pop2piano.py +++ b/src/transformers/models/pop2piano/processing_pop2piano.py @@ -23,8 +23,18 @@ from ...processing_utils import ProcessorMixin from ...tokenization_utils import BatchEncoding, PaddingStrategy, TruncationStrategy from ...utils import TensorType - - +from ...utils.import_utils import export + + +@export( + backends=( + "essentia", + "librosa", + "pretty_midi", + "scipy", + "torch", + ) +) class Pop2PianoProcessor(ProcessorMixin): r""" Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single @@ -137,3 +147,6 @@ def save_pretrained(self, save_directory, **kwargs): def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(*args) + + +__all__ = ["Pop2PianoProcessor"] diff --git a/src/transformers/models/pop2piano/tokenization_pop2piano.py b/src/transformers/models/pop2piano/tokenization_pop2piano.py index 5ad0996c15a4..ee0d702bf8ea 100644 --- a/src/transformers/models/pop2piano/tokenization_pop2piano.py +++ b/src/transformers/models/pop2piano/tokenization_pop2piano.py @@ -23,6 +23,7 @@ from ...feature_extraction_utils import BatchFeature from ...tokenization_utils import AddedToken, BatchEncoding, PaddingStrategy, PreTrainedTokenizer, TruncationStrategy from ...utils import TensorType, is_pretty_midi_available, logging, requires_backends, to_numpy +from ...utils.import_utils import export if is_pretty_midi_available(): @@ -59,6 +60,15 @@ def token_note_to_note(number, current_velocity, default_velocity, note_onsets_r return notes +@export( + backends=( + "essentia", + "librosa", + "pretty_midi", + "scipy", + "torch", + ) +) class Pop2PianoTokenizer(PreTrainedTokenizer): """ Constructs a Pop2Piano tokenizer. This tokenizer does not require training. @@ -362,7 +372,7 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = def encode_plus( self, - notes: Union[np.ndarray, List[pretty_midi.Note]], + notes: Union[np.ndarray, List["pretty_midi.Note"]], truncation_strategy: Optional[TruncationStrategy] = None, max_length: Optional[int] = None, **kwargs, @@ -433,7 +443,7 @@ def encode_plus( def batch_encode_plus( self, - notes: Union[np.ndarray, List[pretty_midi.Note]], + notes: Union[np.ndarray, List["pretty_midi.Note"]], truncation_strategy: Optional[TruncationStrategy] = None, max_length: Optional[int] = None, **kwargs, @@ -474,8 +484,8 @@ def __call__( self, notes: Union[ np.ndarray, - List[pretty_midi.Note], - List[List[pretty_midi.Note]], + List["pretty_midi.Note"], + List[List["pretty_midi.Note"]], ], padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, @@ -714,3 +724,6 @@ def batch_decode( return BatchEncoding({"notes": notes_list, "pretty_midi_objects": pretty_midi_objects_list}) return BatchEncoding({"notes": notes_list}) + + +__all__ = ["Pop2PianoTokenizer"] diff --git a/src/transformers/models/prophetnet/__init__.py b/src/transformers/models/prophetnet/__init__.py index 2e1a1ac61014..4eff195d6a96 100644 --- a/src/transformers/models/prophetnet/__init__.py +++ b/src/transformers/models/prophetnet/__init__.py @@ -11,53 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_prophetnet": ["ProphetNetConfig"], - "tokenization_prophetnet": ["ProphetNetTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_prophetnet"] = [ - "ProphetNetDecoder", - "ProphetNetEncoder", - "ProphetNetForCausalLM", - "ProphetNetForConditionalGeneration", - "ProphetNetModel", - "ProphetNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_prophetnet import ProphetNetConfig - from .tokenization_prophetnet import ProphetNetTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_prophetnet import ( - ProphetNetDecoder, - ProphetNetEncoder, - ProphetNetForCausalLM, - ProphetNetForConditionalGeneration, - ProphetNetModel, - ProphetNetPreTrainedModel, - ) - + from .configuration_prophetnet import * + from .modeling_prophetnet import * + from .tokenization_prophetnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/prophetnet/configuration_prophetnet.py b/src/transformers/models/prophetnet/configuration_prophetnet.py index 7a9da32b3cac..1219e1faacd4 100644 --- a/src/transformers/models/prophetnet/configuration_prophetnet.py +++ b/src/transformers/models/prophetnet/configuration_prophetnet.py @@ -175,3 +175,6 @@ def num_hidden_layers(self, value): "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." ) + + +__all__ = ["ProphetNetConfig"] diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 96fa2e2c12e5..3e6ce9de7046 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -2335,3 +2335,13 @@ def _tie_weights(self): def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) + + +__all__ = [ + "ProphetNetPreTrainedModel", + "ProphetNetEncoder", + "ProphetNetDecoder", + "ProphetNetModel", + "ProphetNetForConditionalGeneration", + "ProphetNetForCausalLM", +] diff --git a/src/transformers/models/prophetnet/tokenization_prophetnet.py b/src/transformers/models/prophetnet/tokenization_prophetnet.py index b253ca709958..043db7e725d7 100644 --- a/src/transformers/models/prophetnet/tokenization_prophetnet.py +++ b/src/transformers/models/prophetnet/tokenization_prophetnet.py @@ -497,3 +497,6 @@ def build_inputs_with_special_tokens( return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep + + +__all__ = ["ProphetNetTokenizer"] diff --git a/src/transformers/models/pvt/__init__.py b/src/transformers/models/pvt/__init__.py index 1ee7092f0c46..502a4757dc90 100644 --- a/src/transformers/models/pvt/__init__.py +++ b/src/transformers/models/pvt/__init__.py @@ -16,63 +16,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_pvt": ["PvtConfig", "PvtOnnxConfig"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pvt"] = ["PvtImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pvt"] = [ - "PvtForImageClassification", - "PvtModel", - "PvtPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pvt import PvtConfig, PvtOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pvt import PvtImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pvt import ( - PvtForImageClassification, - PvtModel, - PvtPreTrainedModel, - ) - + from .configuration_pvt import * + from .image_processing_pvt import * + from .modeling_pvt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pvt/configuration_pvt.py b/src/transformers/models/pvt/configuration_pvt.py index 25348818f090..c97c2703ef23 100644 --- a/src/transformers/models/pvt/configuration_pvt.py +++ b/src/transformers/models/pvt/configuration_pvt.py @@ -157,3 +157,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["PvtConfig", "PvtOnnxConfig"] diff --git a/src/transformers/models/pvt/image_processing_pvt.py b/src/transformers/models/pvt/image_processing_pvt.py index c8edba4dc67b..88a1ddad0cf4 100644 --- a/src/transformers/models/pvt/image_processing_pvt.py +++ b/src/transformers/models/pvt/image_processing_pvt.py @@ -34,11 +34,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class PvtImageProcessor(BaseImageProcessor): r""" Constructs a PVT image processor. @@ -271,3 +273,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["PvtImageProcessor"] diff --git a/src/transformers/models/pvt/modeling_pvt.py b/src/transformers/models/pvt/modeling_pvt.py index 7befa4dad021..bdfacd5f8812 100755 --- a/src/transformers/models/pvt/modeling_pvt.py +++ b/src/transformers/models/pvt/modeling_pvt.py @@ -666,3 +666,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["PvtPreTrainedModel", "PvtModel", "PvtForImageClassification"] diff --git a/src/transformers/models/pvt_v2/__init__.py b/src/transformers/models/pvt_v2/__init__.py index 4825eda16505..eb01dbc5aed7 100644 --- a/src/transformers/models/pvt_v2/__init__.py +++ b/src/transformers/models/pvt_v2/__init__.py @@ -16,49 +16,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_pvt_v2": ["PvtV2Config"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pvt_v2"] = [ - "PvtV2ForImageClassification", - "PvtV2Model", - "PvtV2PreTrainedModel", - "PvtV2Backbone", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_pvt_v2 import PvtV2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pvt_v2 import ( - PvtV2Backbone, - PvtV2ForImageClassification, - PvtV2Model, - PvtV2PreTrainedModel, - ) - + from .configuration_pvt_v2 import * + from .modeling_pvt_v2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/pvt_v2/configuration_pvt_v2.py b/src/transformers/models/pvt_v2/configuration_pvt_v2.py index f6d7de299ba3..9aef0e760f3c 100644 --- a/src/transformers/models/pvt_v2/configuration_pvt_v2.py +++ b/src/transformers/models/pvt_v2/configuration_pvt_v2.py @@ -151,3 +151,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["PvtV2Config"] diff --git a/src/transformers/models/pvt_v2/modeling_pvt_v2.py b/src/transformers/models/pvt_v2/modeling_pvt_v2.py index a2e1e7a67452..b83549fd9e34 100644 --- a/src/transformers/models/pvt_v2/modeling_pvt_v2.py +++ b/src/transformers/models/pvt_v2/modeling_pvt_v2.py @@ -698,3 +698,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["PvtV2PreTrainedModel", "PvtV2Model", "PvtV2ForImageClassification", "PvtV2Backbone"] diff --git a/src/transformers/models/qwen2/__init__.py b/src/transformers/models/qwen2/__init__.py index 35df37e91a98..e447a94c54c8 100644 --- a/src/transformers/models/qwen2/__init__.py +++ b/src/transformers/models/qwen2/__init__.py @@ -13,70 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_qwen2": ["Qwen2Config"], - "tokenization_qwen2": ["Qwen2Tokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_qwen2_fast"] = ["Qwen2TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qwen2"] = [ - "Qwen2ForCausalLM", - "Qwen2Model", - "Qwen2PreTrainedModel", - "Qwen2ForSequenceClassification", - "Qwen2ForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qwen2 import Qwen2Config - from .tokenization_qwen2 import Qwen2Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_qwen2_fast import Qwen2TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qwen2 import ( - Qwen2ForCausalLM, - Qwen2ForSequenceClassification, - Qwen2ForTokenClassification, - Qwen2Model, - Qwen2PreTrainedModel, - ) - - + from .configuration_qwen2 import * + from .modeling_qwen2 import * + from .tokenization_qwen2 import * + from .tokenization_qwen2_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/qwen2/configuration_qwen2.py b/src/transformers/models/qwen2/configuration_qwen2.py index 3eebf631fe17..63a4c72f240d 100644 --- a/src/transformers/models/qwen2/configuration_qwen2.py +++ b/src/transformers/models/qwen2/configuration_qwen2.py @@ -138,3 +138,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["Qwen2Config"] diff --git a/src/transformers/models/qwen2/modeling_qwen2.py b/src/transformers/models/qwen2/modeling_qwen2.py index 0b6c28350b6a..978947bd2b7c 100644 --- a/src/transformers/models/qwen2/modeling_qwen2.py +++ b/src/transformers/models/qwen2/modeling_qwen2.py @@ -1359,6 +1359,7 @@ def forward( """, QWEN2_START_DOCSTRING, ) + # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2 class Qwen2ForTokenClassification(Qwen2PreTrainedModel): def __init__(self, config): @@ -1435,3 +1436,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Qwen2PreTrainedModel", + "Qwen2Model", + "Qwen2ForCausalLM", + "Qwen2ForSequenceClassification", + "Qwen2ForTokenClassification", +] diff --git a/src/transformers/models/qwen2/tokenization_qwen2.py b/src/transformers/models/qwen2/tokenization_qwen2.py index be2685430f64..c388789b728f 100644 --- a/src/transformers/models/qwen2/tokenization_qwen2.py +++ b/src/transformers/models/qwen2/tokenization_qwen2.py @@ -337,3 +337,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = def prepare_for_tokenization(self, text, **kwargs): text = unicodedata.normalize("NFC", text) return (text, kwargs) + + +__all__ = ["Qwen2Tokenizer"] diff --git a/src/transformers/models/qwen2/tokenization_qwen2_fast.py b/src/transformers/models/qwen2/tokenization_qwen2_fast.py index fcfc4ab764da..b7312755ef58 100644 --- a/src/transformers/models/qwen2/tokenization_qwen2_fast.py +++ b/src/transformers/models/qwen2/tokenization_qwen2_fast.py @@ -132,3 +132,6 @@ def __init__( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["Qwen2TokenizerFast"] diff --git a/src/transformers/models/qwen2_moe/__init__.py b/src/transformers/models/qwen2_moe/__init__.py index e2b73ba2d1f9..428b5fae22e2 100644 --- a/src/transformers/models/qwen2_moe/__init__.py +++ b/src/transformers/models/qwen2_moe/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_qwen2_moe": ["Qwen2MoeConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_qwen2_moe"] = [ - "Qwen2MoeForCausalLM", - "Qwen2MoeModel", - "Qwen2MoePreTrainedModel", - "Qwen2MoeForSequenceClassification", - "Qwen2MoeForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_qwen2_moe import Qwen2MoeConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_qwen2_moe import ( - Qwen2MoeForCausalLM, - Qwen2MoeForSequenceClassification, - Qwen2MoeForTokenClassification, - Qwen2MoeModel, - Qwen2MoePreTrainedModel, - ) - - + from .configuration_qwen2_moe import * + from .modeling_qwen2_moe import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py b/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py index b7aa09efdcd9..d3ea25970c7a 100644 --- a/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/configuration_qwen2_moe.py @@ -175,3 +175,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["Qwen2MoeConfig"] diff --git a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py index 03ac51a0f94f..fde3fa0d0406 100644 --- a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py @@ -1632,3 +1632,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Qwen2MoePreTrainedModel", + "Qwen2MoeModel", + "Qwen2MoeForCausalLM", + "Qwen2MoeForSequenceClassification", + "Qwen2MoeForTokenClassification", +] diff --git a/src/transformers/models/rag/__init__.py b/src/transformers/models/rag/__init__.py index b238c6290832..8edc15d4a96f 100644 --- a/src/transformers/models/rag/__init__.py +++ b/src/transformers/models/rag/__init__.py @@ -11,72 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_rag": ["RagConfig"], - "retrieval_rag": ["RagRetriever"], - "tokenization_rag": ["RagTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_rag"] = [ - "RagModel", - "RagPreTrainedModel", - "RagSequenceForGeneration", - "RagTokenForGeneration", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_rag"] = [ - "TFRagModel", - "TFRagPreTrainedModel", - "TFRagSequenceForGeneration", - "TFRagTokenForGeneration", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_rag import RagConfig - from .retrieval_rag import RagRetriever - from .tokenization_rag import RagTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_rag import ( - TFRagModel, - TFRagPreTrainedModel, - TFRagSequenceForGeneration, - TFRagTokenForGeneration, - ) - + from .configuration_rag import * + from .modeling_rag import * + from .modeling_tf_rag import * + from .retrieval_rag import * + from .tokenization_rag import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/rag/configuration_rag.py b/src/transformers/models/rag/configuration_rag.py index 98de7382a456..c76926f21879 100644 --- a/src/transformers/models/rag/configuration_rag.py +++ b/src/transformers/models/rag/configuration_rag.py @@ -181,3 +181,6 @@ def from_question_encoder_generator_configs( [`EncoderDecoderConfig`]: An instance of a configuration object """ return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs) + + +__all__ = ["RagConfig"] diff --git a/src/transformers/models/rag/modeling_rag.py b/src/transformers/models/rag/modeling_rag.py index bc375b68e947..c18663d539bc 100644 --- a/src/transformers/models/rag/modeling_rag.py +++ b/src/transformers/models/rag/modeling_rag.py @@ -1639,3 +1639,6 @@ def _mask_pads(ll, smooth_obj): eps_i = epsilon / rag_logprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss + + +__all__ = ["RagPreTrainedModel", "RagModel", "RagSequenceForGeneration", "RagTokenForGeneration"] diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index 1f243665ea0d..0f4819299078 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -1768,3 +1768,6 @@ def build(self, input_shape=None): if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None) + + +__all__ = ["TFRagPreTrainedModel", "TFRagModel", "TFRagTokenForGeneration", "TFRagSequenceForGeneration"] diff --git a/src/transformers/models/rag/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py index b9ae49b5e9c1..f4000aa6e7f6 100644 --- a/src/transformers/models/rag/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -672,3 +672,6 @@ def __call__( }, tensor_type=return_tensors, ) + + +__all__ = ["RagRetriever"] diff --git a/src/transformers/models/rag/tokenization_rag.py b/src/transformers/models/rag/tokenization_rag.py index 5bc87a895d78..4d0a994e766f 100644 --- a/src/transformers/models/rag/tokenization_rag.py +++ b/src/transformers/models/rag/tokenization_rag.py @@ -119,3 +119,6 @@ def prepare_seq2seq_batch( ) model_inputs["labels"] = labels["input_ids"] return model_inputs + + +__all__ = ["RagTokenizer"] diff --git a/src/transformers/models/recurrent_gemma/__init__.py b/src/transformers/models/recurrent_gemma/__init__.py index 3ac7ff1c99b0..b817528e92e6 100644 --- a/src/transformers/models/recurrent_gemma/__init__.py +++ b/src/transformers/models/recurrent_gemma/__init__.py @@ -13,47 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_recurrent_gemma": ["RecurrentGemmaConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_recurrent_gemma"] = [ - "RecurrentGemmaForCausalLM", - "RecurrentGemmaModel", - "RecurrentGemmaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_recurrent_gemma import RecurrentGemmaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_recurrent_gemma import ( - RecurrentGemmaForCausalLM, - RecurrentGemmaModel, - RecurrentGemmaPreTrainedModel, - ) - + from .configuration_recurrent_gemma import * + from .modeling_recurrent_gemma import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py b/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py index 7f45a41710cf..60a034f57d3d 100644 --- a/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +++ b/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py @@ -156,3 +156,6 @@ def __init__( @property def layers_block_type(self): return (self.block_types * 100)[: self.num_hidden_layers] + + +__all__ = ["RecurrentGemmaConfig"] diff --git a/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py b/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py index a8f076fad79c..3fba36ec75f2 100644 --- a/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +++ b/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py @@ -941,3 +941,6 @@ def _reorder_cache(self, past_key_values, beam_idx): k_state = k_state.index_select(0, beam_idx.to(k_state.device)) v_state = v_state.index_select(0, beam_idx.to(v_state.device)) return None + + +__all__ = ["RecurrentGemmaPreTrainedModel", "RecurrentGemmaModel", "RecurrentGemmaForCausalLM"] diff --git a/src/transformers/models/reformer/__init__.py b/src/transformers/models/reformer/__init__.py index ef13dd7c312d..24730c779d27 100644 --- a/src/transformers/models/reformer/__init__.py +++ b/src/transformers/models/reformer/__init__.py @@ -11,91 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_reformer": ["ReformerConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_reformer"] = ["ReformerTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_reformer_fast"] = ["ReformerTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_reformer"] = [ - "ReformerAttention", - "ReformerForMaskedLM", - "ReformerForQuestionAnswering", - "ReformerForSequenceClassification", - "ReformerLayer", - "ReformerModel", - "ReformerModelWithLMHead", - "ReformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_reformer import ReformerConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_reformer import ReformerTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_reformer_fast import ReformerTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_reformer import ( - ReformerAttention, - ReformerForMaskedLM, - ReformerForQuestionAnswering, - ReformerForSequenceClassification, - ReformerLayer, - ReformerModel, - ReformerModelWithLMHead, - ReformerPreTrainedModel, - ) - + from .configuration_reformer import * + from .modeling_reformer import * + from .tokenization_reformer import * + from .tokenization_reformer_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/reformer/configuration_reformer.py b/src/transformers/models/reformer/configuration_reformer.py index 018831010b01..ab9b1c5f64cc 100755 --- a/src/transformers/models/reformer/configuration_reformer.py +++ b/src/transformers/models/reformer/configuration_reformer.py @@ -230,3 +230,6 @@ def __init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) + + +__all__ = ["ReformerConfig"] diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index 2e98a07217e6..146fb8643165 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -2680,3 +2680,13 @@ def forward( hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) + + +__all__ = [ + "ReformerPreTrainedModel", + "ReformerModel", + "ReformerModelWithLMHead", + "ReformerForMaskedLM", + "ReformerForSequenceClassification", + "ReformerForQuestionAnswering", +] diff --git a/src/transformers/models/reformer/tokenization_reformer.py b/src/transformers/models/reformer/tokenization_reformer.py index eb4574933673..a141fd350992 100644 --- a/src/transformers/models/reformer/tokenization_reformer.py +++ b/src/transformers/models/reformer/tokenization_reformer.py @@ -22,6 +22,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -32,6 +33,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} +@export(backends=("sentencepiece",)) class ReformerTokenizer(PreTrainedTokenizer): """ Construct a Reformer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece) . @@ -169,3 +171,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["ReformerTokenizer"] diff --git a/src/transformers/models/reformer/tokenization_reformer_fast.py b/src/transformers/models/reformer/tokenization_reformer_fast.py index 26f007a7f71b..a48441c55e5a 100644 --- a/src/transformers/models/reformer/tokenization_reformer_fast.py +++ b/src/transformers/models/reformer/tokenization_reformer_fast.py @@ -113,3 +113,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["ReformerTokenizerFast"] diff --git a/src/transformers/models/regnet/__init__.py b/src/transformers/models/regnet/__init__.py index 25507927affd..620c6f50fba5 100644 --- a/src/transformers/models/regnet/__init__.py +++ b/src/transformers/models/regnet/__init__.py @@ -13,95 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = {"configuration_regnet": ["RegNetConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_regnet"] = [ - "RegNetForImageClassification", - "RegNetModel", - "RegNetPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_regnet"] = [ - "TFRegNetForImageClassification", - "TFRegNetModel", - "TFRegNetPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_regnet"] = [ - "FlaxRegNetForImageClassification", - "FlaxRegNetModel", - "FlaxRegNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_regnet import RegNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_regnet import ( - RegNetForImageClassification, - RegNetModel, - RegNetPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_regnet import ( - TFRegNetForImageClassification, - TFRegNetModel, - TFRegNetPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_regnet import ( - FlaxRegNetForImageClassification, - FlaxRegNetModel, - FlaxRegNetPreTrainedModel, - ) - - + from .configuration_regnet import * + from .modeling_flax_regnet import * + from .modeling_regnet import * + from .modeling_tf_regnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/regnet/configuration_regnet.py b/src/transformers/models/regnet/configuration_regnet.py index 34f90ce1841f..c089f67297aa 100644 --- a/src/transformers/models/regnet/configuration_regnet.py +++ b/src/transformers/models/regnet/configuration_regnet.py @@ -89,3 +89,6 @@ def __init__( self.hidden_act = hidden_act # always downsample in the first stage self.downsample_in_first_stage = True + + +__all__ = ["RegNetConfig"] diff --git a/src/transformers/models/regnet/modeling_flax_regnet.py b/src/transformers/models/regnet/modeling_flax_regnet.py index fc4258257bdb..06ac24ec28de 100644 --- a/src/transformers/models/regnet/modeling_flax_regnet.py +++ b/src/transformers/models/regnet/modeling_flax_regnet.py @@ -817,3 +817,5 @@ class FlaxRegNetForImageClassification(FlaxRegNetPreTrainedModel): output_type=FlaxImageClassifierOutputWithNoAttention, config_class=RegNetConfig, ) + +__all__ = ["FlaxRegNetPreTrainedModel", "FlaxRegNetModel", "FlaxRegNetForImageClassification"] diff --git a/src/transformers/models/regnet/modeling_regnet.py b/src/transformers/models/regnet/modeling_regnet.py index 9420fb5edad5..4f5a3c2dfc3d 100644 --- a/src/transformers/models/regnet/modeling_regnet.py +++ b/src/transformers/models/regnet/modeling_regnet.py @@ -449,3 +449,6 @@ def forward( return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + +__all__ = ["RegNetPreTrainedModel", "RegNetModel", "RegNetForImageClassification"] diff --git a/src/transformers/models/regnet/modeling_tf_regnet.py b/src/transformers/models/regnet/modeling_tf_regnet.py index 3d6b38b9e4c0..d1b569423280 100644 --- a/src/transformers/models/regnet/modeling_tf_regnet.py +++ b/src/transformers/models/regnet/modeling_tf_regnet.py @@ -606,3 +606,6 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier[1].name): self.classifier[1].build([None, None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFRegNetPreTrainedModel", "TFRegNetModel", "TFRegNetForImageClassification", "TFRegNetMainLayer"] diff --git a/src/transformers/models/rembert/__init__.py b/src/transformers/models/rembert/__init__.py index 5ffaf3c8c04c..6d45cf47fb25 100644 --- a/src/transformers/models/rembert/__init__.py +++ b/src/transformers/models/rembert/__init__.py @@ -11,134 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_rembert": ["RemBertConfig", "RemBertOnnxConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_rembert"] = ["RemBertTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_rembert_fast"] = ["RemBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_rembert"] = [ - "RemBertForCausalLM", - "RemBertForMaskedLM", - "RemBertForMultipleChoice", - "RemBertForQuestionAnswering", - "RemBertForSequenceClassification", - "RemBertForTokenClassification", - "RemBertLayer", - "RemBertModel", - "RemBertPreTrainedModel", - "load_tf_weights_in_rembert", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_rembert"] = [ - "TFRemBertForCausalLM", - "TFRemBertForMaskedLM", - "TFRemBertForMultipleChoice", - "TFRemBertForQuestionAnswering", - "TFRemBertForSequenceClassification", - "TFRemBertForTokenClassification", - "TFRemBertLayer", - "TFRemBertModel", - "TFRemBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_rembert import RemBertConfig, RemBertOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_rembert import RemBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_rembert_fast import RemBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_rembert import ( - RemBertForCausalLM, - RemBertForMaskedLM, - RemBertForMultipleChoice, - RemBertForQuestionAnswering, - RemBertForSequenceClassification, - RemBertForTokenClassification, - RemBertLayer, - RemBertModel, - RemBertPreTrainedModel, - load_tf_weights_in_rembert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_rembert import ( - TFRemBertForCausalLM, - TFRemBertForMaskedLM, - TFRemBertForMultipleChoice, - TFRemBertForQuestionAnswering, - TFRemBertForSequenceClassification, - TFRemBertForTokenClassification, - TFRemBertLayer, - TFRemBertModel, - TFRemBertPreTrainedModel, - ) - - + from .configuration_rembert import * + from .modeling_rembert import * + from .modeling_tf_rembert import * + from .tokenization_rembert import * + from .tokenization_rembert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/rembert/configuration_rembert.py b/src/transformers/models/rembert/configuration_rembert.py index f9d28303fdca..b4b1fbc325c2 100644 --- a/src/transformers/models/rembert/configuration_rembert.py +++ b/src/transformers/models/rembert/configuration_rembert.py @@ -157,3 +157,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["RemBertConfig", "RemBertOnnxConfig"] diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index 31f7e3dce454..0402c1dcada9 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -1520,3 +1520,16 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_rembert", + "RemBertPreTrainedModel", + "RemBertModel", + "RemBertForMaskedLM", + "RemBertForCausalLM", + "RemBertForSequenceClassification", + "RemBertForMultipleChoice", + "RemBertForTokenClassification", + "RemBertForQuestionAnswering", +] diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index 5ee9ba1364d9..1ff2e7844087 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -1706,3 +1706,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRemBertPreTrainedModel", + "TFRemBertModel", + "TFRemBertForMaskedLM", + "TFRemBertForCausalLM", + "TFRemBertForSequenceClassification", + "TFRemBertForMultipleChoice", + "TFRemBertForTokenClassification", + "TFRemBertForQuestionAnswering", + "TFRemBertMainLayer", +] diff --git a/src/transformers/models/rembert/tokenization_rembert.py b/src/transformers/models/rembert/tokenization_rembert.py index 0c046b9bca1d..225fc7ba44c8 100644 --- a/src/transformers/models/rembert/tokenization_rembert.py +++ b/src/transformers/models/rembert/tokenization_rembert.py @@ -22,6 +22,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -29,6 +30,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.model"} +@export(backends=("sentencepiece",)) class RemBertTokenizer(PreTrainedTokenizer): """ Construct a RemBERT tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -260,3 +262,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["RemBertTokenizer"] diff --git a/src/transformers/models/rembert/tokenization_rembert_fast.py b/src/transformers/models/rembert/tokenization_rembert_fast.py index 350e02e33bf4..7eed4f80a787 100644 --- a/src/transformers/models/rembert/tokenization_rembert_fast.py +++ b/src/transformers/models/rembert/tokenization_rembert_fast.py @@ -227,3 +227,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["RemBertTokenizerFast"] diff --git a/src/transformers/models/resnet/__init__.py b/src/transformers/models/resnet/__init__.py index 50b71a4dd4cf..19debe333859 100644 --- a/src/transformers/models/resnet/__init__.py +++ b/src/transformers/models/resnet/__init__.py @@ -13,92 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_resnet": ["ResNetConfig", "ResNetOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_resnet"] = [ - "ResNetForImageClassification", - "ResNetModel", - "ResNetPreTrainedModel", - "ResNetBackbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_resnet"] = [ - "TFResNetForImageClassification", - "TFResNetModel", - "TFResNetPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_resnet"] = [ - "FlaxResNetForImageClassification", - "FlaxResNetModel", - "FlaxResNetPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_resnet import ResNetConfig, ResNetOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_resnet import ( - ResNetBackbone, - ResNetForImageClassification, - ResNetModel, - ResNetPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_resnet import ( - TFResNetForImageClassification, - TFResNetModel, - TFResNetPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel - - + from .configuration_resnet import * + from .modeling_flax_resnet import * + from .modeling_resnet import * + from .modeling_tf_resnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index 92fe65628749..42bc19a8bca5 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -131,3 +131,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-3 + + +__all__ = ["ResNetConfig", "ResNetOnnxConfig"] diff --git a/src/transformers/models/resnet/modeling_flax_resnet.py b/src/transformers/models/resnet/modeling_flax_resnet.py index 07c07e95115b..4094cbe8b399 100644 --- a/src/transformers/models/resnet/modeling_flax_resnet.py +++ b/src/transformers/models/resnet/modeling_flax_resnet.py @@ -699,3 +699,5 @@ class FlaxResNetForImageClassification(FlaxResNetPreTrainedModel): append_replace_return_docstrings( FlaxResNetForImageClassification, output_type=FlaxImageClassifierOutputWithNoAttention, config_class=ResNetConfig ) + +__all__ = ["FlaxResNetPreTrainedModel", "FlaxResNetModel", "FlaxResNetForImageClassification"] diff --git a/src/transformers/models/resnet/modeling_resnet.py b/src/transformers/models/resnet/modeling_resnet.py index ccd4fac17582..e22af9e60575 100644 --- a/src/transformers/models/resnet/modeling_resnet.py +++ b/src/transformers/models/resnet/modeling_resnet.py @@ -515,3 +515,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) + + +__all__ = ["ResNetPreTrainedModel", "ResNetModel", "ResNetForImageClassification", "ResNetBackbone"] diff --git a/src/transformers/models/resnet/modeling_tf_resnet.py b/src/transformers/models/resnet/modeling_tf_resnet.py index 1e2ec143cda0..569d1af703e5 100644 --- a/src/transformers/models/resnet/modeling_tf_resnet.py +++ b/src/transformers/models/resnet/modeling_tf_resnet.py @@ -591,3 +591,6 @@ def build(self, input_shape=None): if getattr(self, "classifier_layer", None) is not None: with tf.name_scope(self.classifier_layer.name): self.classifier_layer.build([None, None, self.config.hidden_sizes[-1]]) + + +__all__ = ["TFResNetPreTrainedModel", "TFResNetModel", "TFResNetForImageClassification", "TFResNetMainLayer"] diff --git a/src/transformers/models/roberta/__init__.py b/src/transformers/models/roberta/__init__.py index 4a97962f4f57..cfee80a173f0 100644 --- a/src/transformers/models/roberta/__init__.py +++ b/src/transformers/models/roberta/__init__.py @@ -11,150 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_roberta": ["RobertaConfig", "RobertaOnnxConfig"], - "tokenization_roberta": ["RobertaTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_roberta_fast"] = ["RobertaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roberta"] = [ - "RobertaForCausalLM", - "RobertaForMaskedLM", - "RobertaForMultipleChoice", - "RobertaForQuestionAnswering", - "RobertaForSequenceClassification", - "RobertaForTokenClassification", - "RobertaModel", - "RobertaPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_roberta"] = [ - "TFRobertaForCausalLM", - "TFRobertaForMaskedLM", - "TFRobertaForMultipleChoice", - "TFRobertaForQuestionAnswering", - "TFRobertaForSequenceClassification", - "TFRobertaForTokenClassification", - "TFRobertaMainLayer", - "TFRobertaModel", - "TFRobertaPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_roberta"] = [ - "FlaxRobertaForCausalLM", - "FlaxRobertaForMaskedLM", - "FlaxRobertaForMultipleChoice", - "FlaxRobertaForQuestionAnswering", - "FlaxRobertaForSequenceClassification", - "FlaxRobertaForTokenClassification", - "FlaxRobertaModel", - "FlaxRobertaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_roberta import RobertaConfig, RobertaOnnxConfig - from .tokenization_roberta import RobertaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_roberta_fast import RobertaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roberta import ( - RobertaForCausalLM, - RobertaForMaskedLM, - RobertaForMultipleChoice, - RobertaForQuestionAnswering, - RobertaForSequenceClassification, - RobertaForTokenClassification, - RobertaModel, - RobertaPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_roberta import ( - TFRobertaForCausalLM, - TFRobertaForMaskedLM, - TFRobertaForMultipleChoice, - TFRobertaForQuestionAnswering, - TFRobertaForSequenceClassification, - TFRobertaForTokenClassification, - TFRobertaMainLayer, - TFRobertaModel, - TFRobertaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_roberta import ( - FlaxRobertaForCausalLM, - FlaxRobertaForMaskedLM, - FlaxRobertaForMultipleChoice, - FlaxRobertaForQuestionAnswering, - FlaxRobertaForSequenceClassification, - FlaxRobertaForTokenClassification, - FlaxRobertaModel, - FlaxRobertaPreTrainedModel, - ) - + from .configuration_roberta import * + from .modeling_flax_roberta import * + from .modeling_roberta import * + from .modeling_tf_roberta import * + from .tokenization_roberta import * + from .tokenization_roberta_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roberta/configuration_roberta.py b/src/transformers/models/roberta/configuration_roberta.py index d08f3df47718..35ff80115f1d 100644 --- a/src/transformers/models/roberta/configuration_roberta.py +++ b/src/transformers/models/roberta/configuration_roberta.py @@ -150,3 +150,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["RobertaConfig", "RobertaOnnxConfig"] diff --git a/src/transformers/models/roberta/modeling_flax_roberta.py b/src/transformers/models/roberta/modeling_flax_roberta.py index ecdd31386b21..d5245271f9a8 100644 --- a/src/transformers/models/roberta/modeling_flax_roberta.py +++ b/src/transformers/models/roberta/modeling_flax_roberta.py @@ -1486,3 +1486,14 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxRobertaPreTrainedModel", + "FlaxRobertaModel", + "FlaxRobertaForMaskedLM", + "FlaxRobertaForSequenceClassification", + "FlaxRobertaForMultipleChoice", + "FlaxRobertaForTokenClassification", + "FlaxRobertaForQuestionAnswering", + "FlaxRobertaForCausalLM", +] diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index f1f83147527d..9fab3a807685 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -1700,3 +1700,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "RobertaPreTrainedModel", + "RobertaModel", + "RobertaForCausalLM", + "RobertaForMaskedLM", + "RobertaForSequenceClassification", + "RobertaForMultipleChoice", + "RobertaForTokenClassification", + "RobertaForQuestionAnswering", +] diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index 439d12a87026..62ee4f86744e 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -1768,3 +1768,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRobertaPreTrainedModel", + "TFRobertaModel", + "TFRobertaForMaskedLM", + "TFRobertaForCausalLM", + "TFRobertaForSequenceClassification", + "TFRobertaForMultipleChoice", + "TFRobertaForTokenClassification", + "TFRobertaForQuestionAnswering", + "TFRobertaMainLayer", +] diff --git a/src/transformers/models/roberta/tokenization_roberta.py b/src/transformers/models/roberta/tokenization_roberta.py index 072c44ac4dd3..6ec6cbbf9866 100644 --- a/src/transformers/models/roberta/tokenization_roberta.py +++ b/src/transformers/models/roberta/tokenization_roberta.py @@ -397,3 +397,6 @@ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) + + +__all__ = ["RobertaTokenizer"] diff --git a/src/transformers/models/roberta/tokenization_roberta_fast.py b/src/transformers/models/roberta/tokenization_roberta_fast.py index 8384397033ce..336148f41380 100644 --- a/src/transformers/models/roberta/tokenization_roberta_fast.py +++ b/src/transformers/models/roberta/tokenization_roberta_fast.py @@ -267,3 +267,6 @@ def create_token_type_ids_from_sequences( if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + +__all__ = ["RobertaTokenizerFast"] diff --git a/src/transformers/models/roberta_prelayernorm/__init__.py b/src/transformers/models/roberta_prelayernorm/__init__.py index 9f55eed11c42..0346e9e2a4c6 100644 --- a/src/transformers/models/roberta_prelayernorm/__init__.py +++ b/src/transformers/models/roberta_prelayernorm/__init__.py @@ -11,137 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_roberta_prelayernorm": [ - "RobertaPreLayerNormConfig", - "RobertaPreLayerNormOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roberta_prelayernorm"] = [ - "RobertaPreLayerNormForCausalLM", - "RobertaPreLayerNormForMaskedLM", - "RobertaPreLayerNormForMultipleChoice", - "RobertaPreLayerNormForQuestionAnswering", - "RobertaPreLayerNormForSequenceClassification", - "RobertaPreLayerNormForTokenClassification", - "RobertaPreLayerNormModel", - "RobertaPreLayerNormPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_roberta_prelayernorm"] = [ - "TFRobertaPreLayerNormForCausalLM", - "TFRobertaPreLayerNormForMaskedLM", - "TFRobertaPreLayerNormForMultipleChoice", - "TFRobertaPreLayerNormForQuestionAnswering", - "TFRobertaPreLayerNormForSequenceClassification", - "TFRobertaPreLayerNormForTokenClassification", - "TFRobertaPreLayerNormMainLayer", - "TFRobertaPreLayerNormModel", - "TFRobertaPreLayerNormPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_roberta_prelayernorm"] = [ - "FlaxRobertaPreLayerNormForCausalLM", - "FlaxRobertaPreLayerNormForMaskedLM", - "FlaxRobertaPreLayerNormForMultipleChoice", - "FlaxRobertaPreLayerNormForQuestionAnswering", - "FlaxRobertaPreLayerNormForSequenceClassification", - "FlaxRobertaPreLayerNormForTokenClassification", - "FlaxRobertaPreLayerNormModel", - "FlaxRobertaPreLayerNormPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_roberta_prelayernorm import ( - RobertaPreLayerNormConfig, - RobertaPreLayerNormOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roberta_prelayernorm import ( - RobertaPreLayerNormForCausalLM, - RobertaPreLayerNormForMaskedLM, - RobertaPreLayerNormForMultipleChoice, - RobertaPreLayerNormForQuestionAnswering, - RobertaPreLayerNormForSequenceClassification, - RobertaPreLayerNormForTokenClassification, - RobertaPreLayerNormModel, - RobertaPreLayerNormPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_roberta_prelayernorm import ( - TFRobertaPreLayerNormForCausalLM, - TFRobertaPreLayerNormForMaskedLM, - TFRobertaPreLayerNormForMultipleChoice, - TFRobertaPreLayerNormForQuestionAnswering, - TFRobertaPreLayerNormForSequenceClassification, - TFRobertaPreLayerNormForTokenClassification, - TFRobertaPreLayerNormMainLayer, - TFRobertaPreLayerNormModel, - TFRobertaPreLayerNormPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_roberta_prelayernorm import ( - FlaxRobertaPreLayerNormForCausalLM, - FlaxRobertaPreLayerNormForMaskedLM, - FlaxRobertaPreLayerNormForMultipleChoice, - FlaxRobertaPreLayerNormForQuestionAnswering, - FlaxRobertaPreLayerNormForSequenceClassification, - FlaxRobertaPreLayerNormForTokenClassification, - FlaxRobertaPreLayerNormModel, - FlaxRobertaPreLayerNormPreTrainedModel, - ) - + from .configuration_roberta_prelayernorm import * + from .modeling_flax_roberta_prelayernorm import * + from .modeling_roberta_prelayernorm import * + from .modeling_tf_roberta_prelayernorm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py index e0c939f6575c..71ecbd4474d4 100644 --- a/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py @@ -152,3 +152,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig"] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py index c50227eaa296..a3e94bcd481f 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py @@ -1513,3 +1513,14 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxRobertaPreLayerNormPreTrainedModel", + "FlaxRobertaPreLayerNormModel", + "FlaxRobertaPreLayerNormForMaskedLM", + "FlaxRobertaPreLayerNormForSequenceClassification", + "FlaxRobertaPreLayerNormForMultipleChoice", + "FlaxRobertaPreLayerNormForTokenClassification", + "FlaxRobertaPreLayerNormForQuestionAnswering", + "FlaxRobertaPreLayerNormForCausalLM", +] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py index 95657c260dc7..41e8379617fb 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py @@ -1560,3 +1560,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "RobertaPreLayerNormPreTrainedModel", + "RobertaPreLayerNormModel", + "RobertaPreLayerNormForCausalLM", + "RobertaPreLayerNormForMaskedLM", + "RobertaPreLayerNormForSequenceClassification", + "RobertaPreLayerNormForMultipleChoice", + "RobertaPreLayerNormForTokenClassification", + "RobertaPreLayerNormForQuestionAnswering", +] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py index 1ecd376901fe..0d7a253a29be 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py @@ -1793,3 +1793,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRobertaPreLayerNormPreTrainedModel", + "TFRobertaPreLayerNormModel", + "TFRobertaPreLayerNormForMaskedLM", + "TFRobertaPreLayerNormForCausalLM", + "TFRobertaPreLayerNormForMultipleChoice", + "TFRobertaPreLayerNormForTokenClassification", + "TFRobertaPreLayerNormForQuestionAnswering", + "TFRobertaPreLayerNormForSequenceClassification", + "TFRobertaPreLayerNormMainLayer", +] diff --git a/src/transformers/models/roc_bert/__init__.py b/src/transformers/models/roc_bert/__init__.py index 9971c53975d4..cd53a2414e40 100644 --- a/src/transformers/models/roc_bert/__init__.py +++ b/src/transformers/models/roc_bert/__init__.py @@ -13,76 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_roc_bert": ["RoCBertConfig"], - "tokenization_roc_bert": ["RoCBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - pass - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roc_bert"] = [ - "RoCBertForCausalLM", - "RoCBertForMaskedLM", - "RoCBertForMultipleChoice", - "RoCBertForPreTraining", - "RoCBertForQuestionAnswering", - "RoCBertForSequenceClassification", - "RoCBertForTokenClassification", - "RoCBertLayer", - "RoCBertModel", - "RoCBertPreTrainedModel", - "load_tf_weights_in_roc_bert", - ] - if TYPE_CHECKING: - from .configuration_roc_bert import RoCBertConfig - from .tokenization_roc_bert import RoCBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - raise OptionalDependencyNotAvailable() - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roc_bert import ( - RoCBertForCausalLM, - RoCBertForMaskedLM, - RoCBertForMultipleChoice, - RoCBertForPreTraining, - RoCBertForQuestionAnswering, - RoCBertForSequenceClassification, - RoCBertForTokenClassification, - RoCBertLayer, - RoCBertModel, - RoCBertPreTrainedModel, - load_tf_weights_in_roc_bert, - ) - - + from .configuration_roc_bert import * + from .modeling_roc_bert import * + from .tokenization_roc_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roc_bert/configuration_roc_bert.py b/src/transformers/models/roc_bert/configuration_roc_bert.py index d402349e67b5..4bf53bf33842 100644 --- a/src/transformers/models/roc_bert/configuration_roc_bert.py +++ b/src/transformers/models/roc_bert/configuration_roc_bert.py @@ -158,3 +158,6 @@ def __init__( self.position_embedding_type = position_embedding_type self.classifier_dropout = classifier_dropout super().__init__(pad_token_id=pad_token_id, **kwargs) + + +__all__ = ["RoCBertConfig"] diff --git a/src/transformers/models/roc_bert/modeling_roc_bert.py b/src/transformers/models/roc_bert/modeling_roc_bert.py index c4efbf16323e..607c19ae5e89 100644 --- a/src/transformers/models/roc_bert/modeling_roc_bert.py +++ b/src/transformers/models/roc_bert/modeling_roc_bert.py @@ -1993,3 +1993,17 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_roc_bert", + "RoCBertPreTrainedModel", + "RoCBertModel", + "RoCBertForPreTraining", + "RoCBertForMaskedLM", + "RoCBertForCausalLM", + "RoCBertForSequenceClassification", + "RoCBertForMultipleChoice", + "RoCBertForTokenClassification", + "RoCBertForQuestionAnswering", +] diff --git a/src/transformers/models/roc_bert/tokenization_roc_bert.py b/src/transformers/models/roc_bert/tokenization_roc_bert.py index eaf2a1a49133..a99e1769fbba 100644 --- a/src/transformers/models/roc_bert/tokenization_roc_bert.py +++ b/src/transformers/models/roc_bert/tokenization_roc_bert.py @@ -1106,3 +1106,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["RoCBertTokenizer"] diff --git a/src/transformers/models/roformer/__init__.py b/src/transformers/models/roformer/__init__.py index d9642eba59fe..493c5f90e1c9 100644 --- a/src/transformers/models/roformer/__init__.py +++ b/src/transformers/models/roformer/__init__.py @@ -13,152 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_roformer": ["RoFormerConfig", "RoFormerOnnxConfig"], - "tokenization_roformer": ["RoFormerTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_roformer_fast"] = ["RoFormerTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_roformer"] = [ - "RoFormerForCausalLM", - "RoFormerForMaskedLM", - "RoFormerForMultipleChoice", - "RoFormerForQuestionAnswering", - "RoFormerForSequenceClassification", - "RoFormerForTokenClassification", - "RoFormerLayer", - "RoFormerModel", - "RoFormerPreTrainedModel", - "load_tf_weights_in_roformer", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_roformer"] = [ - "TFRoFormerForCausalLM", - "TFRoFormerForMaskedLM", - "TFRoFormerForMultipleChoice", - "TFRoFormerForQuestionAnswering", - "TFRoFormerForSequenceClassification", - "TFRoFormerForTokenClassification", - "TFRoFormerLayer", - "TFRoFormerModel", - "TFRoFormerPreTrainedModel", - ] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_roformer"] = [ - "FlaxRoFormerForMaskedLM", - "FlaxRoFormerForMultipleChoice", - "FlaxRoFormerForQuestionAnswering", - "FlaxRoFormerForSequenceClassification", - "FlaxRoFormerForTokenClassification", - "FlaxRoFormerModel", - "FlaxRoFormerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_roformer import RoFormerConfig, RoFormerOnnxConfig - from .tokenization_roformer import RoFormerTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_roformer_fast import RoFormerTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_roformer import ( - RoFormerForCausalLM, - RoFormerForMaskedLM, - RoFormerForMultipleChoice, - RoFormerForQuestionAnswering, - RoFormerForSequenceClassification, - RoFormerForTokenClassification, - RoFormerLayer, - RoFormerModel, - RoFormerPreTrainedModel, - load_tf_weights_in_roformer, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_roformer import ( - TFRoFormerForCausalLM, - TFRoFormerForMaskedLM, - TFRoFormerForMultipleChoice, - TFRoFormerForQuestionAnswering, - TFRoFormerForSequenceClassification, - TFRoFormerForTokenClassification, - TFRoFormerLayer, - TFRoFormerModel, - TFRoFormerPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_roformer import ( - FlaxRoFormerForMaskedLM, - FlaxRoFormerForMultipleChoice, - FlaxRoFormerForQuestionAnswering, - FlaxRoFormerForSequenceClassification, - FlaxRoFormerForTokenClassification, - FlaxRoFormerModel, - FlaxRoFormerPreTrainedModel, - ) - - + from .configuration_roformer import * + from .modeling_flax_roformer import * + from .modeling_roformer import * + from .modeling_tf_roformer import * + from .tokenization_roformer import * + from .tokenization_roformer_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/roformer/configuration_roformer.py b/src/transformers/models/roformer/configuration_roformer.py index ae4ed0fd7b00..1852509199e6 100644 --- a/src/transformers/models/roformer/configuration_roformer.py +++ b/src/transformers/models/roformer/configuration_roformer.py @@ -145,3 +145,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["RoFormerConfig", "RoFormerOnnxConfig"] diff --git a/src/transformers/models/roformer/modeling_flax_roformer.py b/src/transformers/models/roformer/modeling_flax_roformer.py index f53a056c13af..9e33de0732aa 100644 --- a/src/transformers/models/roformer/modeling_flax_roformer.py +++ b/src/transformers/models/roformer/modeling_flax_roformer.py @@ -1078,3 +1078,13 @@ class FlaxRoFormerForQuestionAnswering(FlaxRoFormerPreTrainedModel): FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxRoFormerPreTrainedModel", + "FlaxRoFormerModel", + "FlaxRoFormerForMaskedLM", + "FlaxRoFormerForSequenceClassification", + "FlaxRoFormerForMultipleChoice", + "FlaxRoFormerForTokenClassification", + "FlaxRoFormerForQuestionAnswering", +] diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 69588ff743a0..e3061fb36cad 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -1564,3 +1564,16 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_roformer", + "RoFormerPreTrainedModel", + "RoFormerModel", + "RoFormerForMaskedLM", + "RoFormerForCausalLM", + "RoFormerForSequenceClassification", + "RoFormerForMultipleChoice", + "RoFormerForTokenClassification", + "RoFormerForQuestionAnswering", +] diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 20af18369194..dcb2677dde68 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -1532,3 +1532,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFRoFormerPreTrainedModel", + "TFRoFormerModel", + "TFRoFormerForMaskedLM", + "TFRoFormerForCausalLM", + "TFRoFormerForSequenceClassification", + "TFRoFormerForMultipleChoice", + "TFRoFormerForTokenClassification", + "TFRoFormerForQuestionAnswering", + "TFRoFormerMainLayer", +] diff --git a/src/transformers/models/roformer/tokenization_roformer.py b/src/transformers/models/roformer/tokenization_roformer.py index 33fe68f8225c..312cecca193b 100644 --- a/src/transformers/models/roformer/tokenization_roformer.py +++ b/src/transformers/models/roformer/tokenization_roformer.py @@ -535,3 +535,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = writer.write(token + "\n") index += 1 return (vocab_file,) + + +__all__ = ["RoFormerTokenizer"] diff --git a/src/transformers/models/roformer/tokenization_roformer_fast.py b/src/transformers/models/roformer/tokenization_roformer_fast.py index cc161c1a2679..7f75e5a2ff12 100644 --- a/src/transformers/models/roformer/tokenization_roformer_fast.py +++ b/src/transformers/models/roformer/tokenization_roformer_fast.py @@ -175,3 +175,6 @@ def save_pretrained( ): self.backend_tokenizer.pre_tokenizer = BertPreTokenizer() return super().save_pretrained(save_directory, legacy_format, filename_prefix, push_to_hub, **kwargs) + + +__all__ = ["RoFormerTokenizerFast"] diff --git a/src/transformers/models/rwkv/__init__.py b/src/transformers/models/rwkv/__init__.py index 2cbfd94bac7b..a84cccfbb496 100644 --- a/src/transformers/models/rwkv/__init__.py +++ b/src/transformers/models/rwkv/__init__.py @@ -11,48 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_rwkv": ["RwkvConfig", "RwkvOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_rwkv"] = [ - "RwkvForCausalLM", - "RwkvModel", - "RwkvPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_rwkv import RwkvConfig, RwkvOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_rwkv import ( - RwkvForCausalLM, - RwkvModel, - RwkvPreTrainedModel, - ) + from .configuration_rwkv import * + from .modeling_rwkv import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/rwkv/configuration_rwkv.py b/src/transformers/models/rwkv/configuration_rwkv.py index 9539b857eac1..90c5cf7e1c8b 100644 --- a/src/transformers/models/rwkv/configuration_rwkv.py +++ b/src/transformers/models/rwkv/configuration_rwkv.py @@ -115,3 +115,6 @@ def __init__( super().__init__( tie_word_embeddings=tie_word_embeddings, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs ) + + +__all__ = ["RwkvConfig"] diff --git a/src/transformers/models/rwkv/modeling_rwkv.py b/src/transformers/models/rwkv/modeling_rwkv.py index 7dec1f26e1a3..ba1c72974911 100644 --- a/src/transformers/models/rwkv/modeling_rwkv.py +++ b/src/transformers/models/rwkv/modeling_rwkv.py @@ -844,3 +844,6 @@ def forward( hidden_states=rwkv_outputs.hidden_states, attentions=rwkv_outputs.attentions, ) + + +__all__ = ["RwkvPreTrainedModel", "RwkvModel", "RwkvForCausalLM"] diff --git a/src/transformers/models/sam/__init__.py b/src/transformers/models/sam/__init__.py index 672281440c1a..4a6539977f2a 100644 --- a/src/transformers/models/sam/__init__.py +++ b/src/transformers/models/sam/__init__.py @@ -13,89 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_sam": [ - "SamConfig", - "SamMaskDecoderConfig", - "SamPromptEncoderConfig", - "SamVisionConfig", - ], - "processing_sam": ["SamProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_sam"] = [ - "SamModel", - "SamPreTrainedModel", - ] -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_sam"] = [ - "TFSamModel", - "TFSamPreTrainedModel", - ] -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_sam"] = ["SamImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_sam import ( - SamConfig, - SamMaskDecoderConfig, - SamPromptEncoderConfig, - SamVisionConfig, - ) - from .processing_sam import SamProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_sam import SamModel, SamPreTrainedModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_sam import TFSamModel, TFSamPreTrainedModel - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_sam import SamImageProcessor - + from .configuration_sam import * + from .image_processing_sam import * + from .modeling_sam import * + from .modeling_tf_sam import * + from .processing_sam import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/sam/configuration_sam.py b/src/transformers/models/sam/configuration_sam.py index b0045655d206..f05aa52c765b 100644 --- a/src/transformers/models/sam/configuration_sam.py +++ b/src/transformers/models/sam/configuration_sam.py @@ -303,3 +303,6 @@ def __init__( self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config) self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config) self.initializer_range = initializer_range + + +__all__ = ["SamPromptEncoderConfig", "SamMaskDecoderConfig", "SamVisionConfig", "SamConfig"] diff --git a/src/transformers/models/sam/image_processing_sam.py b/src/transformers/models/sam/image_processing_sam.py index beea3f4b01c3..60c3c23dcd3d 100644 --- a/src/transformers/models/sam/image_processing_sam.py +++ b/src/transformers/models/sam/image_processing_sam.py @@ -46,6 +46,7 @@ logging, requires_backends, ) +from ...utils.import_utils import export if is_torch_available(): @@ -64,6 +65,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class SamImageProcessor(BaseImageProcessor): r""" Constructs a SAM image processor. @@ -1473,3 +1475,6 @@ def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thre masks = [_rle_to_mask(rle) for rle in rle_masks] return masks, iou_scores, rle_masks, mask_boxes + + +__all__ = ["SamImageProcessor"] diff --git a/src/transformers/models/sam/modeling_sam.py b/src/transformers/models/sam/modeling_sam.py index c99fb9d7e869..440794377de6 100644 --- a/src/transformers/models/sam/modeling_sam.py +++ b/src/transformers/models/sam/modeling_sam.py @@ -1410,3 +1410,6 @@ def forward( vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions, ) + + +__all__ = ["SamPreTrainedModel", "SamModel"] diff --git a/src/transformers/models/sam/modeling_tf_sam.py b/src/transformers/models/sam/modeling_tf_sam.py index 1e5099f191e9..abe486eb315b 100644 --- a/src/transformers/models/sam/modeling_tf_sam.py +++ b/src/transformers/models/sam/modeling_tf_sam.py @@ -1650,3 +1650,6 @@ def build(self, input_shape=None): if getattr(self, "mask_decoder", None) is not None: with tf.name_scope(self.mask_decoder.name): self.mask_decoder.build(None) + + +__all__ = ["TFSamPreTrainedModel", "TFSamModel"] diff --git a/src/transformers/models/sam/processing_sam.py b/src/transformers/models/sam/processing_sam.py index 9e67be1e1e55..5c6cb0132ce1 100644 --- a/src/transformers/models/sam/processing_sam.py +++ b/src/transformers/models/sam/processing_sam.py @@ -265,3 +265,6 @@ def model_input_names(self): def post_process_masks(self, *args, **kwargs): return self.image_processor.post_process_masks(*args, **kwargs) + + +__all__ = ["SamProcessor"] diff --git a/src/transformers/models/seamless_m4t/__init__.py b/src/transformers/models/seamless_m4t/__init__.py index 56b04e76b62c..2fd8bcbff45f 100644 --- a/src/transformers/models/seamless_m4t/__init__.py +++ b/src/transformers/models/seamless_m4t/__init__.py @@ -13,97 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_seamless_m4t": ["SeamlessM4TConfig"], - "feature_extraction_seamless_m4t": ["SeamlessM4TFeatureExtractor"], - "processing_seamless_m4t": ["SeamlessM4TProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_seamless_m4t"] = ["SeamlessM4TTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_seamless_m4t_fast"] = ["SeamlessM4TTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_seamless_m4t"] = [ - "SeamlessM4TForTextToSpeech", - "SeamlessM4TForSpeechToSpeech", - "SeamlessM4TForTextToText", - "SeamlessM4TForSpeechToText", - "SeamlessM4TModel", - "SeamlessM4TPreTrainedModel", - "SeamlessM4TCodeHifiGan", - "SeamlessM4THifiGan", - "SeamlessM4TTextToUnitForConditionalGeneration", - "SeamlessM4TTextToUnitModel", - ] - if TYPE_CHECKING: - from .configuration_seamless_m4t import SeamlessM4TConfig - from .feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor - from .processing_seamless_m4t import SeamlessM4TProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_seamless_m4t import SeamlessM4TTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_seamless_m4t_fast import SeamlessM4TTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_seamless_m4t import ( - SeamlessM4TCodeHifiGan, - SeamlessM4TForSpeechToSpeech, - SeamlessM4TForSpeechToText, - SeamlessM4TForTextToSpeech, - SeamlessM4TForTextToText, - SeamlessM4THifiGan, - SeamlessM4TModel, - SeamlessM4TPreTrainedModel, - SeamlessM4TTextToUnitForConditionalGeneration, - SeamlessM4TTextToUnitModel, - ) - + from .configuration_seamless_m4t import * + from .feature_extraction_seamless_m4t import * + from .modeling_seamless_m4t import * + from .processing_seamless_m4t import * + from .tokenization_seamless_m4t import * + from .tokenization_seamless_m4t_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py index c24eb0ecb64c..f406264b0308 100644 --- a/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/configuration_seamless_m4t.py @@ -411,3 +411,6 @@ def __init__( max_position_embeddings=max_position_embeddings, **kwargs, ) + + +__all__ = ["SeamlessM4TConfig"] diff --git a/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py b/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py index 2a83e56fc0bd..08ea4ea08286 100644 --- a/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py @@ -304,3 +304,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["SeamlessM4TFeatureExtractor"] diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index a79d1d4cf2b9..17ef2098580a 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -4404,3 +4404,17 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past + + +__all__ = [ + "SeamlessM4TPreTrainedModel", + "SeamlessM4TTextToUnitModel", + "SeamlessM4TTextToUnitForConditionalGeneration", + "SeamlessM4TCodeHifiGan", + "SeamlessM4TForTextToText", + "SeamlessM4TForSpeechToText", + "SeamlessM4TForTextToSpeech", + "SeamlessM4TForSpeechToSpeech", + "SeamlessM4TModel", + "SeamlessM4THifiGan", +] diff --git a/src/transformers/models/seamless_m4t/processing_seamless_m4t.py b/src/transformers/models/seamless_m4t/processing_seamless_m4t.py index 7e838913ca14..dd80b503eead 100644 --- a/src/transformers/models/seamless_m4t/processing_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/processing_seamless_m4t.py @@ -115,3 +115,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) + + +__all__ = ["SeamlessM4TProcessor"] diff --git a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py index d6017a6e0579..b28eebee5bbb 100644 --- a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py @@ -29,6 +29,7 @@ ) from ...tokenization_utils_base import AddedToken from ...utils import PaddingStrategy, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -40,6 +41,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} +@export(backends=("sentencepiece",)) class SeamlessM4TTokenizer(PreTrainedTokenizer): """ Construct a SeamlessM4T tokenizer. @@ -560,3 +562,6 @@ def set_tgt_lang_special_tokens(self, lang: str) -> None: self.prefix_tokens = [self.eos_token_id, self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] + + +__all__ = ["SeamlessM4TTokenizer"] diff --git a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py index 70892c9948b8..c1142c10719f 100644 --- a/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py +++ b/src/transformers/models/seamless_m4t/tokenization_seamless_m4t_fast.py @@ -445,3 +445,6 @@ def __call__( ) return output + + +__all__ = ["SeamlessM4TTokenizerFast"] diff --git a/src/transformers/models/seamless_m4t_v2/__init__.py b/src/transformers/models/seamless_m4t_v2/__init__.py index 5fde6a5d332a..276c9f0f4db2 100644 --- a/src/transformers/models/seamless_m4t_v2/__init__.py +++ b/src/transformers/models/seamless_m4t_v2/__init__.py @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_seamless_m4t_v2": ["SeamlessM4Tv2Config"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_seamless_m4t_v2"] = [ - "SeamlessM4Tv2ForTextToSpeech", - "SeamlessM4Tv2ForSpeechToSpeech", - "SeamlessM4Tv2ForTextToText", - "SeamlessM4Tv2ForSpeechToText", - "SeamlessM4Tv2Model", - "SeamlessM4Tv2PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_seamless_m4t_v2 import ( - SeamlessM4Tv2ForSpeechToSpeech, - SeamlessM4Tv2ForSpeechToText, - SeamlessM4Tv2ForTextToSpeech, - SeamlessM4Tv2ForTextToText, - SeamlessM4Tv2Model, - SeamlessM4Tv2PreTrainedModel, - ) - + from .configuration_seamless_m4t_v2 import * + from .modeling_seamless_m4t_v2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py index 30082cd5fd87..d29eaf454010 100644 --- a/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py @@ -420,3 +420,6 @@ def __init__( max_position_embeddings=max_position_embeddings, **kwargs, ) + + +__all__ = ["SeamlessM4Tv2Config"] diff --git a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py index a53f544bb34f..40247f389ad1 100644 --- a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py @@ -4807,3 +4807,13 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past + + +__all__ = [ + "SeamlessM4Tv2PreTrainedModel", + "SeamlessM4Tv2ForTextToText", + "SeamlessM4Tv2ForSpeechToText", + "SeamlessM4Tv2ForTextToSpeech", + "SeamlessM4Tv2ForSpeechToSpeech", + "SeamlessM4Tv2Model", +] diff --git a/src/transformers/models/segformer/__init__.py b/src/transformers/models/segformer/__init__.py index 8d8cccdf39ff..5fd93babb423 100644 --- a/src/transformers/models/segformer/__init__.py +++ b/src/transformers/models/segformer/__init__.py @@ -13,97 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = {"configuration_segformer": ["SegformerConfig", "SegformerOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_segformer"] = ["SegformerFeatureExtractor"] - _import_structure["image_processing_segformer"] = ["SegformerImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_segformer"] = [ - "SegformerDecodeHead", - "SegformerForImageClassification", - "SegformerForSemanticSegmentation", - "SegformerLayer", - "SegformerModel", - "SegformerPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_segformer"] = [ - "TFSegformerDecodeHead", - "TFSegformerForImageClassification", - "TFSegformerForSemanticSegmentation", - "TFSegformerModel", - "TFSegformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_segformer import SegformerConfig, SegformerOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_segformer import SegformerFeatureExtractor - from .image_processing_segformer import SegformerImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_segformer import ( - SegformerDecodeHead, - SegformerForImageClassification, - SegformerForSemanticSegmentation, - SegformerLayer, - SegformerModel, - SegformerPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_segformer import ( - TFSegformerDecodeHead, - TFSegformerForImageClassification, - TFSegformerForSemanticSegmentation, - TFSegformerModel, - TFSegformerPreTrainedModel, - ) - + from .configuration_segformer import * + from .feature_extraction_segformer import * + from .image_processing_segformer import * + from .modeling_segformer import * + from .modeling_tf_segformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/segformer/configuration_segformer.py b/src/transformers/models/segformer/configuration_segformer.py index 28fc1a7334e9..58683a86c78a 100644 --- a/src/transformers/models/segformer/configuration_segformer.py +++ b/src/transformers/models/segformer/configuration_segformer.py @@ -166,3 +166,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["SegformerConfig", "SegformerOnnxConfig"] diff --git a/src/transformers/models/segformer/feature_extraction_segformer.py b/src/transformers/models/segformer/feature_extraction_segformer.py index 3c081e738906..2389fcde3980 100644 --- a/src/transformers/models/segformer/feature_extraction_segformer.py +++ b/src/transformers/models/segformer/feature_extraction_segformer.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_segformer import SegformerImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class SegformerFeatureExtractor(SegformerImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["SegformerFeatureExtractor"] diff --git a/src/transformers/models/segformer/image_processing_segformer.py b/src/transformers/models/segformer/image_processing_segformer.py index da1c9be40a5e..ed71067bd327 100644 --- a/src/transformers/models/segformer/image_processing_segformer.py +++ b/src/transformers/models/segformer/image_processing_segformer.py @@ -42,6 +42,7 @@ logging, ) from ...utils.deprecation import deprecate_kwarg +from ...utils.import_utils import export if is_vision_available(): @@ -54,6 +55,7 @@ logger = logging.get_logger(__name__) +@export(backends=("vision",)) class SegformerImageProcessor(BaseImageProcessor): r""" Constructs a Segformer image processor. @@ -477,3 +479,6 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation + + +__all__ = ["SegformerImageProcessor"] diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index 44582a74ccc9..230d67fd7211 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -826,3 +826,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "SegformerPreTrainedModel", + "SegformerModel", + "SegformerForImageClassification", + "SegformerDecodeHead", + "SegformerForSemanticSegmentation", +] diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index 4cd52e135edc..e6abe03cd480 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -1034,3 +1034,13 @@ def build(self, input_shape=None): if getattr(self, "decode_head", None) is not None: with tf.name_scope(self.decode_head.name): self.decode_head.build(None) + + +__all__ = [ + "TFSegformerPreTrainedModel", + "TFSegformerModel", + "TFSegformerForImageClassification", + "TFSegformerDecodeHead", + "TFSegformerForSemanticSegmentation", + "TFSegformerMainLayer", +] diff --git a/src/transformers/models/seggpt/__init__.py b/src/transformers/models/seggpt/__init__.py index b6095b53277a..ec646f2f81a2 100644 --- a/src/transformers/models/seggpt/__init__.py +++ b/src/transformers/models/seggpt/__init__.py @@ -13,55 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_seggpt": ["SegGptConfig", "SegGptOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_seggpt"] = [ - "SegGptModel", - "SegGptPreTrainedModel", - "SegGptForImageSegmentation", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_seggpt"] = ["SegGptImageProcessor"] - if TYPE_CHECKING: - from .configuration_seggpt import SegGptConfig, SegGptOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_seggpt import ( - SegGptForImageSegmentation, - SegGptModel, - SegGptPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_seggpt import SegGptImageProcessor - + from .configuration_seggpt import * + from .image_processing_seggpt import * + from .modeling_seggpt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/seggpt/configuration_seggpt.py b/src/transformers/models/seggpt/configuration_seggpt.py index f79e7f12b2ef..a735281faccc 100644 --- a/src/transformers/models/seggpt/configuration_seggpt.py +++ b/src/transformers/models/seggpt/configuration_seggpt.py @@ -138,3 +138,6 @@ def __init__( self.intermediate_hidden_state_indices = intermediate_hidden_state_indices self.beta = beta self.mlp_dim = int(hidden_size * 4) if mlp_dim is None else mlp_dim + + +__all__ = ["SegGptConfig"] diff --git a/src/transformers/models/seggpt/image_processing_seggpt.py b/src/transformers/models/seggpt/image_processing_seggpt.py index 1e4a5e23d093..9512971342ef 100644 --- a/src/transformers/models/seggpt/image_processing_seggpt.py +++ b/src/transformers/models/seggpt/image_processing_seggpt.py @@ -33,6 +33,7 @@ valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging, requires_backends +from ...utils.import_utils import export if is_torch_available(): @@ -95,6 +96,7 @@ def mask_to_rgb( return to_channel_dimension_format(rgb_mask, data_format) +@export(backends=("vision",)) class SegGptImageProcessor(BaseImageProcessor): r""" Constructs a SegGpt image processor. @@ -613,3 +615,6 @@ def post_process_semantic_segmentation( semantic_segmentation.append(pred) return semantic_segmentation + + +__all__ = ["SegGptImageProcessor"] diff --git a/src/transformers/models/seggpt/modeling_seggpt.py b/src/transformers/models/seggpt/modeling_seggpt.py index 174aeaad00ae..8c34bb9594bf 100644 --- a/src/transformers/models/seggpt/modeling_seggpt.py +++ b/src/transformers/models/seggpt/modeling_seggpt.py @@ -1020,3 +1020,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["SegGptPreTrainedModel", "SegGptModel", "SegGptForImageSegmentation"] diff --git a/src/transformers/models/sew/__init__.py b/src/transformers/models/sew/__init__.py index aba88cc45133..00cf73fc6e1d 100644 --- a/src/transformers/models/sew/__init__.py +++ b/src/transformers/models/sew/__init__.py @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_sew": ["SEWConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_sew"] = [ - "SEWForCTC", - "SEWForSequenceClassification", - "SEWModel", - "SEWPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_sew import SEWConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_sew import ( - SEWForCTC, - SEWForSequenceClassification, - SEWModel, - SEWPreTrainedModel, - ) - - + from .configuration_sew import * + from .modeling_sew import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/sew/configuration_sew.py b/src/transformers/models/sew/configuration_sew.py index 6c877277aec2..4365e9c33ded 100644 --- a/src/transformers/models/sew/configuration_sew.py +++ b/src/transformers/models/sew/configuration_sew.py @@ -251,3 +251,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["SEWConfig"] diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index 191d7c2cd8c6..00d91179022e 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -1489,3 +1489,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["SEWPreTrainedModel", "SEWModel", "SEWForCTC", "SEWForSequenceClassification"] diff --git a/src/transformers/models/sew_d/__init__.py b/src/transformers/models/sew_d/__init__.py index c99be845d544..eb3ffb1931f8 100644 --- a/src/transformers/models/sew_d/__init__.py +++ b/src/transformers/models/sew_d/__init__.py @@ -13,42 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_sew_d": ["SEWDConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_sew_d"] = [ - "SEWDForCTC", - "SEWDForSequenceClassification", - "SEWDModel", - "SEWDPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_sew_d import SEWDConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_sew_d import ( - SEWDForCTC, - SEWDForSequenceClassification, - SEWDModel, - SEWDPreTrainedModel, - ) - - + from .configuration_sew_d import * + from .modeling_sew_d import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/sew_d/configuration_sew_d.py b/src/transformers/models/sew_d/configuration_sew_d.py index ea791935ba60..f03a8f43c1a2 100644 --- a/src/transformers/models/sew_d/configuration_sew_d.py +++ b/src/transformers/models/sew_d/configuration_sew_d.py @@ -286,3 +286,6 @@ def to_dict(self): output = super().to_dict() output["hidden_dropout"] = output.pop("_hidden_dropout") return output + + +__all__ = ["SEWDConfig"] diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index a617f5e5d64e..a007907f6073 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -1752,3 +1752,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["SEWDPreTrainedModel", "SEWDModel", "SEWDForCTC", "SEWDForSequenceClassification"] diff --git a/src/transformers/models/siglip/__init__.py b/src/transformers/models/siglip/__init__.py index 96ce20e7f230..c0618262afbb 100644 --- a/src/transformers/models/siglip/__init__.py +++ b/src/transformers/models/siglip/__init__.py @@ -13,96 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_torch_available, - is_vision_available, -) - - -_import_structure = { - "configuration_siglip": [ - "SiglipConfig", - "SiglipTextConfig", - "SiglipVisionConfig", - ], - "processing_siglip": ["SiglipProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_siglip"] = ["SiglipTokenizer"] - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_siglip"] = ["SiglipImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_siglip"] = [ - "SiglipModel", - "SiglipPreTrainedModel", - "SiglipTextModel", - "SiglipVisionModel", - "SiglipForImageClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_siglip import ( - SiglipConfig, - SiglipTextConfig, - SiglipVisionConfig, - ) - from .processing_siglip import SiglipProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_siglip import SiglipTokenizer - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_siglip import SiglipImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_siglip import ( - SiglipForImageClassification, - SiglipModel, - SiglipPreTrainedModel, - SiglipTextModel, - SiglipVisionModel, - ) - - + from .configuration_siglip import * + from .image_processing_siglip import * + from .modeling_siglip import * + from .processing_siglip import * + from .tokenization_siglip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/siglip/configuration_siglip.py b/src/transformers/models/siglip/configuration_siglip.py index 73622373cbab..ee1fc086e1f7 100644 --- a/src/transformers/models/siglip/configuration_siglip.py +++ b/src/transformers/models/siglip/configuration_siglip.py @@ -296,3 +296,6 @@ def from_text_vision_configs(cls, text_config: SiglipTextConfig, vision_config: """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["SiglipTextConfig", "SiglipVisionConfig", "SiglipConfig"] diff --git a/src/transformers/models/siglip/image_processing_siglip.py b/src/transformers/models/siglip/image_processing_siglip.py index 5bbeeb74c8f1..497a70f5d951 100644 --- a/src/transformers/models/siglip/image_processing_siglip.py +++ b/src/transformers/models/siglip/image_processing_siglip.py @@ -36,6 +36,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -45,6 +46,7 @@ import PIL +@export(backends=("vision",)) class SiglipImageProcessor(BaseImageProcessor): r""" Constructs a SigLIP image processor. @@ -239,3 +241,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["SiglipImageProcessor"] diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index 1d35d1d44cfd..66cb71ba2393 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -1567,3 +1567,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "SiglipPreTrainedModel", + "SiglipTextModel", + "SiglipVisionModel", + "SiglipModel", + "SiglipForImageClassification", +] diff --git a/src/transformers/models/siglip/processing_siglip.py b/src/transformers/models/siglip/processing_siglip.py index 655fb4d4f78a..fd89287fc3f4 100644 --- a/src/transformers/models/siglip/processing_siglip.py +++ b/src/transformers/models/siglip/processing_siglip.py @@ -140,3 +140,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["SiglipProcessor"] diff --git a/src/transformers/models/siglip/tokenization_siglip.py b/src/transformers/models/siglip/tokenization_siglip.py index 6203c6887054..11dd0e959ebb 100644 --- a/src/transformers/models/siglip/tokenization_siglip.py +++ b/src/transformers/models/siglip/tokenization_siglip.py @@ -26,6 +26,7 @@ from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken +from ...utils.import_utils import export if TYPE_CHECKING: @@ -41,6 +42,7 @@ SPIECE_UNDERLINE = "▁" +@export(backends=("sentencepiece",)) class SiglipTokenizer(PreTrainedTokenizer): """ Construct a Siglip tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -373,3 +375,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["SiglipTokenizer"] diff --git a/src/transformers/models/speech_encoder_decoder/__init__.py b/src/transformers/models/speech_encoder_decoder/__init__.py index 392f21296e72..4dc503b22fa9 100644 --- a/src/transformers/models/speech_encoder_decoder/__init__.py +++ b/src/transformers/models/speech_encoder_decoder/__init__.py @@ -11,50 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speech_encoder_decoder"] = ["SpeechEncoderDecoderModel"] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_speech_encoder_decoder"] = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel - + from .configuration_speech_encoder_decoder import * + from .modeling_flax_speech_encoder_decoder import * + from .modeling_speech_encoder_decoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py index 32a58ec5589e..5ffb92699e2b 100644 --- a/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py @@ -106,3 +106,6 @@ def from_encoder_decoder_configs( decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) + + +__all__ = ["SpeechEncoderDecoderConfig"] diff --git a/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py index 2a15714cff9e..fd837146d5ca 100644 --- a/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/modeling_flax_speech_encoder_decoder.py @@ -924,3 +924,6 @@ def from_encoder_decoder_pretrained( model.params["decoder"] = decoder.params return model + + +__all__ = ["FlaxSpeechEncoderDecoderModel"] diff --git a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py index c2f5dd025909..7953df2a5b48 100644 --- a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py @@ -598,3 +598,6 @@ def resize_token_embeddings(self, *args, **kwargs): def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx) + + +__all__ = ["SpeechEncoderDecoderModel"] diff --git a/src/transformers/models/speech_to_text/__init__.py b/src/transformers/models/speech_to_text/__init__.py index 4ad05da69710..012b1c878d76 100644 --- a/src/transformers/models/speech_to_text/__init__.py +++ b/src/transformers/models/speech_to_text/__init__.py @@ -13,92 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_speech_to_text": ["Speech2TextConfig"], - "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"], - "processing_speech_to_text": ["Speech2TextProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_speech_to_text"] = [ - "TFSpeech2TextForConditionalGeneration", - "TFSpeech2TextModel", - "TFSpeech2TextPreTrainedModel", - ] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speech_to_text"] = [ - "Speech2TextForConditionalGeneration", - "Speech2TextModel", - "Speech2TextPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_speech_to_text import Speech2TextConfig - from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor - from .processing_speech_to_text import Speech2TextProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_speech_to_text import Speech2TextTokenizer - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_speech_to_text import ( - TFSpeech2TextForConditionalGeneration, - TFSpeech2TextModel, - TFSpeech2TextPreTrainedModel, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speech_to_text import ( - Speech2TextForConditionalGeneration, - Speech2TextModel, - Speech2TextPreTrainedModel, - ) - + from .configuration_speech_to_text import * + from .feature_extraction_speech_to_text import * + from .modeling_speech_to_text import * + from .modeling_tf_speech_to_text import * + from .processing_speech_to_text import * + from .tokenization_speech_to_text import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/speech_to_text/configuration_speech_to_text.py b/src/transformers/models/speech_to_text/configuration_speech_to_text.py index 80602e9a7d8e..fef4069e4e41 100644 --- a/src/transformers/models/speech_to_text/configuration_speech_to_text.py +++ b/src/transformers/models/speech_to_text/configuration_speech_to_text.py @@ -194,3 +194,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["Speech2TextConfig"] diff --git a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py index b8a2b6bfb297..56a0b859db45 100644 --- a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py +++ b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py @@ -295,3 +295,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["Speech2TextFeatureExtractor"] diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index 8353a172b212..918a8c33205a 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -1365,3 +1365,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["Speech2TextPreTrainedModel", "Speech2TextModel", "Speech2TextForConditionalGeneration"] diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index bac1256ca4b6..b6e3346b69ad 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -1601,3 +1601,11 @@ def tf_to_pt_weight_rename(self, tf_weight): return tf_weight, "model.decoder.embed_tokens.weight" else: return (tf_weight,) + + +__all__ = [ + "TFSpeech2TextPreTrainedModel", + "TFSpeech2TextModel", + "TFSpeech2TextForConditionalGeneration", + "TFSpeech2TextMainLayer", +] diff --git a/src/transformers/models/speech_to_text/processing_speech_to_text.py b/src/transformers/models/speech_to_text/processing_speech_to_text.py index 646b38999454..57eb1f9cb950 100644 --- a/src/transformers/models/speech_to_text/processing_speech_to_text.py +++ b/src/transformers/models/speech_to_text/processing_speech_to_text.py @@ -115,3 +115,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Speech2TextProcessor"] diff --git a/src/transformers/models/speech_to_text/tokenization_speech_to_text.py b/src/transformers/models/speech_to_text/tokenization_speech_to_text.py index 1b9841f0cfb7..f89459c6305a 100644 --- a/src/transformers/models/speech_to_text/tokenization_speech_to_text.py +++ b/src/transformers/models/speech_to_text/tokenization_speech_to_text.py @@ -24,6 +24,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -45,6 +46,7 @@ LANGUAGES = {"mustc": MUSTC_LANGS} +@export(backends=("sentencepiece",)) class Speech2TextTokenizer(PreTrainedTokenizer): """ Construct an Speech2Text tokenizer. @@ -288,3 +290,6 @@ def load_json(path: str) -> Union[Dict, List]: def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2) + + +__all__ = ["Speech2TextTokenizer"] diff --git a/src/transformers/models/speecht5/__init__.py b/src/transformers/models/speecht5/__init__.py index f9afe52aa4b7..3ea46a9df971 100644 --- a/src/transformers/models/speecht5/__init__.py +++ b/src/transformers/models/speecht5/__init__.py @@ -13,78 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_speecht5": [ - "SpeechT5Config", - "SpeechT5HifiGanConfig", - ], - "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], - "processing_speecht5": ["SpeechT5Processor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_speecht5"] = ["SpeechT5Tokenizer"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_speecht5"] = [ - "SpeechT5ForSpeechToText", - "SpeechT5ForSpeechToSpeech", - "SpeechT5ForTextToSpeech", - "SpeechT5Model", - "SpeechT5PreTrainedModel", - "SpeechT5HifiGan", - ] - if TYPE_CHECKING: - from .configuration_speecht5 import ( - SpeechT5Config, - SpeechT5HifiGanConfig, - ) - from .feature_extraction_speecht5 import SpeechT5FeatureExtractor - from .processing_speecht5 import SpeechT5Processor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_speecht5 import SpeechT5Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_speecht5 import ( - SpeechT5ForSpeechToSpeech, - SpeechT5ForSpeechToText, - SpeechT5ForTextToSpeech, - SpeechT5HifiGan, - SpeechT5Model, - SpeechT5PreTrainedModel, - ) - + from .configuration_speecht5 import * + from .feature_extraction_speecht5 import * + from .modeling_speecht5 import * + from .processing_speecht5 import * + from .tokenization_speecht5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/speecht5/configuration_speecht5.py b/src/transformers/models/speecht5/configuration_speecht5.py index d8f4497de8f7..6f79bdbc61d3 100644 --- a/src/transformers/models/speecht5/configuration_speecht5.py +++ b/src/transformers/models/speecht5/configuration_speecht5.py @@ -417,3 +417,6 @@ def __init__( self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs) + + +__all__ = ["SpeechT5Config", "SpeechT5HifiGanConfig"] diff --git a/src/transformers/models/speecht5/feature_extraction_speecht5.py b/src/transformers/models/speecht5/feature_extraction_speecht5.py index 84d51e97df95..aea75ca50b99 100644 --- a/src/transformers/models/speecht5/feature_extraction_speecht5.py +++ b/src/transformers/models/speecht5/feature_extraction_speecht5.py @@ -391,3 +391,6 @@ def to_dict(self) -> Dict[str, Any]: del output[name] return output + + +__all__ = ["SpeechT5FeatureExtractor"] diff --git a/src/transformers/models/speecht5/modeling_speecht5.py b/src/transformers/models/speecht5/modeling_speecht5.py index a69e9b56ebc5..fd8df7d50388 100644 --- a/src/transformers/models/speecht5/modeling_speecht5.py +++ b/src/transformers/models/speecht5/modeling_speecht5.py @@ -3371,3 +3371,13 @@ def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor: waveform = hidden_states.squeeze(1) return waveform + + +__all__ = [ + "SpeechT5PreTrainedModel", + "SpeechT5Model", + "SpeechT5ForSpeechToText", + "SpeechT5ForTextToSpeech", + "SpeechT5ForSpeechToSpeech", + "SpeechT5HifiGan", +] diff --git a/src/transformers/models/speecht5/processing_speecht5.py b/src/transformers/models/speecht5/processing_speecht5.py index 468a0c1d89ab..0c038d97ae8c 100644 --- a/src/transformers/models/speecht5/processing_speecht5.py +++ b/src/transformers/models/speecht5/processing_speecht5.py @@ -181,3 +181,6 @@ def decode(self, *args, **kwargs): the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["SpeechT5Processor"] diff --git a/src/transformers/models/speecht5/tokenization_speecht5.py b/src/transformers/models/speecht5/tokenization_speecht5.py index 97b2feaab3cc..847eec4b1128 100644 --- a/src/transformers/models/speecht5/tokenization_speecht5.py +++ b/src/transformers/models/speecht5/tokenization_speecht5.py @@ -22,6 +22,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export from .number_normalizer import EnglishNumberNormalizer @@ -30,6 +31,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spm_char.model"} +@export(backends=("sentencepiece",)) class SpeechT5Tokenizer(PreTrainedTokenizer): """ Construct a SpeechT5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -216,3 +218,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["SpeechT5Tokenizer"] diff --git a/src/transformers/models/splinter/__init__.py b/src/transformers/models/splinter/__init__.py index 81896fb15a5b..4614a9823525 100644 --- a/src/transformers/models/splinter/__init__.py +++ b/src/transformers/models/splinter/__init__.py @@ -13,65 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_splinter": ["SplinterConfig"], - "tokenization_splinter": ["SplinterTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_splinter_fast"] = ["SplinterTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_splinter"] = [ - "SplinterForQuestionAnswering", - "SplinterForPreTraining", - "SplinterLayer", - "SplinterModel", - "SplinterPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_splinter import SplinterConfig - from .tokenization_splinter import SplinterTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_splinter_fast import SplinterTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_splinter import ( - SplinterForPreTraining, - SplinterForQuestionAnswering, - SplinterLayer, - SplinterModel, - SplinterPreTrainedModel, - ) - - + from .configuration_splinter import * + from .modeling_splinter import * + from .tokenization_splinter import * + from .tokenization_splinter_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/splinter/configuration_splinter.py b/src/transformers/models/splinter/configuration_splinter.py index 9a946fd4bedb..533b067ed34f 100644 --- a/src/transformers/models/splinter/configuration_splinter.py +++ b/src/transformers/models/splinter/configuration_splinter.py @@ -118,3 +118,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.question_token_id = question_token_id + + +__all__ = ["SplinterConfig"] diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index 6494a57fa4fc..99f1b5f90a10 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -1105,3 +1105,6 @@ def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor: cols = torch.cat([torch.arange(n) for n in num_questions]) positions[rows, cols] = flat_positions return positions + + +__all__ = ["SplinterPreTrainedModel", "SplinterModel", "SplinterForQuestionAnswering", "SplinterForPreTraining"] diff --git a/src/transformers/models/splinter/tokenization_splinter.py b/src/transformers/models/splinter/tokenization_splinter.py index 2859497ba882..408a733e86de 100644 --- a/src/transformers/models/splinter/tokenization_splinter.py +++ b/src/transformers/models/splinter/tokenization_splinter.py @@ -501,3 +501,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["SplinterTokenizer"] diff --git a/src/transformers/models/splinter/tokenization_splinter_fast.py b/src/transformers/models/splinter/tokenization_splinter_fast.py index 0371fdf2828e..85dd01a2be03 100644 --- a/src/transformers/models/splinter/tokenization_splinter_fast.py +++ b/src/transformers/models/splinter/tokenization_splinter_fast.py @@ -188,3 +188,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["SplinterTokenizerFast"] diff --git a/src/transformers/models/squeezebert/__init__.py b/src/transformers/models/squeezebert/__init__.py index 45aff2f64c16..d2876f764662 100644 --- a/src/transformers/models/squeezebert/__init__.py +++ b/src/transformers/models/squeezebert/__init__.py @@ -11,79 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_squeezebert": [ - "SqueezeBertConfig", - "SqueezeBertOnnxConfig", - ], - "tokenization_squeezebert": ["SqueezeBertTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_squeezebert_fast"] = ["SqueezeBertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_squeezebert"] = [ - "SqueezeBertForMaskedLM", - "SqueezeBertForMultipleChoice", - "SqueezeBertForQuestionAnswering", - "SqueezeBertForSequenceClassification", - "SqueezeBertForTokenClassification", - "SqueezeBertModel", - "SqueezeBertModule", - "SqueezeBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_squeezebert import ( - SqueezeBertConfig, - SqueezeBertOnnxConfig, - ) - from .tokenization_squeezebert import SqueezeBertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_squeezebert import ( - SqueezeBertForMaskedLM, - SqueezeBertForMultipleChoice, - SqueezeBertForQuestionAnswering, - SqueezeBertForSequenceClassification, - SqueezeBertForTokenClassification, - SqueezeBertModel, - SqueezeBertModule, - SqueezeBertPreTrainedModel, - ) - + from .configuration_squeezebert import * + from .modeling_squeezebert import * + from .tokenization_squeezebert import * + from .tokenization_squeezebert_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/squeezebert/configuration_squeezebert.py b/src/transformers/models/squeezebert/configuration_squeezebert.py index 1f3753ac5c08..a65951754553 100644 --- a/src/transformers/models/squeezebert/configuration_squeezebert.py +++ b/src/transformers/models/squeezebert/configuration_squeezebert.py @@ -162,3 +162,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["SqueezeBertConfig", "SqueezeBertOnnxConfig"] diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index 483bac01bd9e..f97073b46d47 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -1085,3 +1085,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "SqueezeBertPreTrainedModel", + "SqueezeBertModel", + "SqueezeBertForMaskedLM", + "SqueezeBertForSequenceClassification", + "SqueezeBertForMultipleChoice", + "SqueezeBertForTokenClassification", + "SqueezeBertForQuestionAnswering", +] diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert.py b/src/transformers/models/squeezebert/tokenization_squeezebert.py index 191e57c0f8af..f8196d3654c9 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert.py @@ -501,3 +501,6 @@ def tokenize(self, text): else: output_tokens.extend(sub_tokens) return output_tokens + + +__all__ = ["SqueezeBertTokenizer"] diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py index 985fe657f0c3..a908dcbf146b 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py @@ -171,3 +171,6 @@ def create_token_type_ids_from_sequences( def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) + + +__all__ = ["SqueezeBertTokenizerFast"] diff --git a/src/transformers/models/stablelm/__init__.py b/src/transformers/models/stablelm/__init__.py index c00c045f7f81..1ba6404be683 100644 --- a/src/transformers/models/stablelm/__init__.py +++ b/src/transformers/models/stablelm/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_stablelm": ["StableLmConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_stablelm"] = [ - "StableLmForCausalLM", - "StableLmModel", - "StableLmPreTrainedModel", - "StableLmForSequenceClassification", - "StableLmForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_stablelm import StableLmConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_stablelm import ( - StableLmForCausalLM, - StableLmForSequenceClassification, - StableLmForTokenClassification, - StableLmModel, - StableLmPreTrainedModel, - ) - - + from .configuration_stablelm import * + from .modeling_stablelm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/stablelm/configuration_stablelm.py b/src/transformers/models/stablelm/configuration_stablelm.py index c05ac9f036d6..c9065791af13 100644 --- a/src/transformers/models/stablelm/configuration_stablelm.py +++ b/src/transformers/models/stablelm/configuration_stablelm.py @@ -183,3 +183,6 @@ def _rope_scaling_validation(self): ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") + + +__all__ = ["StableLmConfig"] diff --git a/src/transformers/models/stablelm/modeling_stablelm.py b/src/transformers/models/stablelm/modeling_stablelm.py index 00b73af8948e..556f166c970e 100755 --- a/src/transformers/models/stablelm/modeling_stablelm.py +++ b/src/transformers/models/stablelm/modeling_stablelm.py @@ -1524,3 +1524,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "StableLmPreTrainedModel", + "StableLmModel", + "StableLmForCausalLM", + "StableLmForSequenceClassification", + "StableLmForTokenClassification", +] diff --git a/src/transformers/models/starcoder2/__init__.py b/src/transformers/models/starcoder2/__init__.py index d9dc2cd1e500..f6ab2aa4e828 100644 --- a/src/transformers/models/starcoder2/__init__.py +++ b/src/transformers/models/starcoder2/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_starcoder2": ["Starcoder2Config"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_starcoder2"] = [ - "Starcoder2ForCausalLM", - "Starcoder2Model", - "Starcoder2PreTrainedModel", - "Starcoder2ForSequenceClassification", - "Starcoder2ForTokenClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_starcoder2 import Starcoder2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_starcoder2 import ( - Starcoder2ForCausalLM, - Starcoder2ForSequenceClassification, - Starcoder2ForTokenClassification, - Starcoder2Model, - Starcoder2PreTrainedModel, - ) - - + from .configuration_starcoder2 import * + from .modeling_starcoder2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/starcoder2/configuration_starcoder2.py b/src/transformers/models/starcoder2/configuration_starcoder2.py index 2329f0a0a6b4..9cb0584d38ee 100644 --- a/src/transformers/models/starcoder2/configuration_starcoder2.py +++ b/src/transformers/models/starcoder2/configuration_starcoder2.py @@ -143,3 +143,6 @@ def __init__( eos_token_id=eos_token_id, **kwargs, ) + + +__all__ = ["Starcoder2Config"] diff --git a/src/transformers/models/starcoder2/modeling_starcoder2.py b/src/transformers/models/starcoder2/modeling_starcoder2.py index c9a81a36f749..71930598954f 100644 --- a/src/transformers/models/starcoder2/modeling_starcoder2.py +++ b/src/transformers/models/starcoder2/modeling_starcoder2.py @@ -674,6 +674,7 @@ def forward( "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.", STARCODER2_START_DOCSTRING, ) + # Copied from transformers.models.qwen2.modeling_qwen2.Qwen2PreTrainedModel with Qwen2->Starcoder2 class Starcoder2PreTrainedModel(PreTrainedModel): config_class = Starcoder2Config @@ -1412,3 +1413,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Starcoder2PreTrainedModel", + "Starcoder2Model", + "Starcoder2ForCausalLM", + "Starcoder2ForSequenceClassification", + "Starcoder2ForTokenClassification", +] diff --git a/src/transformers/models/superpoint/__init__.py b/src/transformers/models/superpoint/__init__.py index 90cde651ea0a..aab40abaa86d 100644 --- a/src/transformers/models/superpoint/__init__.py +++ b/src/transformers/models/superpoint/__init__.py @@ -13,57 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_superpoint": ["SuperPointConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_superpoint"] = ["SuperPointImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_superpoint"] = [ - "SuperPointForKeypointDetection", - "SuperPointPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_superpoint import ( - SuperPointConfig, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_superpoint import SuperPointImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_superpoint import ( - SuperPointForKeypointDetection, - SuperPointPreTrainedModel, - ) - + from .configuration_superpoint import * + from .image_processing_superpoint import * + from .modeling_superpoint import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/superpoint/configuration_superpoint.py b/src/transformers/models/superpoint/configuration_superpoint.py index ac97b0aa8f42..82104e682bcb 100644 --- a/src/transformers/models/superpoint/configuration_superpoint.py +++ b/src/transformers/models/superpoint/configuration_superpoint.py @@ -85,3 +85,6 @@ def __init__( self.initializer_range = initializer_range super().__init__(**kwargs) + + +__all__ = ["SuperPointConfig"] diff --git a/src/transformers/models/superpoint/image_processing_superpoint.py b/src/transformers/models/superpoint/image_processing_superpoint.py index fbbb717570cb..59c514fc15e4 100644 --- a/src/transformers/models/superpoint/image_processing_superpoint.py +++ b/src/transformers/models/superpoint/image_processing_superpoint.py @@ -17,7 +17,6 @@ import numpy as np -from ... import is_vision_available from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( @@ -30,6 +29,7 @@ valid_images, ) from ...utils import TensorType, logging, requires_backends +from ...utils.import_utils import export, is_vision_available if is_vision_available(): @@ -84,6 +84,7 @@ def convert_to_grayscale( return image +@export(backends=("vision",)) class SuperPointImageProcessor(BaseImageProcessor): r""" Constructs a SuperPoint image processor. @@ -270,3 +271,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["SuperPointImageProcessor"] diff --git a/src/transformers/models/superpoint/modeling_superpoint.py b/src/transformers/models/superpoint/modeling_superpoint.py index cfd3dfd86e8e..a386b10e14fb 100644 --- a/src/transformers/models/superpoint/modeling_superpoint.py +++ b/src/transformers/models/superpoint/modeling_superpoint.py @@ -497,3 +497,6 @@ def forward( mask=mask, hidden_states=hidden_states, ) + + +__all__ = ["SuperPointPreTrainedModel", "SuperPointForKeypointDetection"] diff --git a/src/transformers/models/swiftformer/__init__.py b/src/transformers/models/swiftformer/__init__.py index 2f5dcc811dde..75ac195746a3 100644 --- a/src/transformers/models/swiftformer/__init__.py +++ b/src/transformers/models/swiftformer/__init__.py @@ -13,75 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_swiftformer": [ - "SwiftFormerConfig", - "SwiftFormerOnnxConfig", - ] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swiftformer"] = [ - "SwiftFormerForImageClassification", - "SwiftFormerModel", - "SwiftFormerPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_swiftformer"] = [ - "TFSwiftFormerForImageClassification", - "TFSwiftFormerModel", - "TFSwiftFormerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_swiftformer import ( - SwiftFormerConfig, - SwiftFormerOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swiftformer import ( - SwiftFormerForImageClassification, - SwiftFormerModel, - SwiftFormerPreTrainedModel, - ) - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_swiftformer import ( - TFSwiftFormerForImageClassification, - TFSwiftFormerModel, - TFSwiftFormerPreTrainedModel, - ) - + from .configuration_swiftformer import * + from .modeling_swiftformer import * + from .modeling_tf_swiftformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swiftformer/configuration_swiftformer.py b/src/transformers/models/swiftformer/configuration_swiftformer.py index abfdf5165271..00a0aaddfa24 100644 --- a/src/transformers/models/swiftformer/configuration_swiftformer.py +++ b/src/transformers/models/swiftformer/configuration_swiftformer.py @@ -143,3 +143,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["SwiftFormerConfig", "SwiftFormerOnnxConfig"] diff --git a/src/transformers/models/swiftformer/modeling_swiftformer.py b/src/transformers/models/swiftformer/modeling_swiftformer.py index bd86c3d7173e..32c8f7331317 100644 --- a/src/transformers/models/swiftformer/modeling_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_swiftformer.py @@ -602,3 +602,6 @@ def forward( logits=logits, hidden_states=outputs.hidden_states, ) + + +__all__ = ["SwiftFormerModel", "SwiftFormerForImageClassification", "SwiftFormerPreTrainedModel"] diff --git a/src/transformers/models/swiftformer/modeling_tf_swiftformer.py b/src/transformers/models/swiftformer/modeling_tf_swiftformer.py index 3f1d19e9e33f..52be2cd3849f 100644 --- a/src/transformers/models/swiftformer/modeling_tf_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_tf_swiftformer.py @@ -861,3 +861,11 @@ def build(self, input_shape=None): with tf.name_scope(self.dist_head.name): self.dist_head.build(self.config.embed_dims[-1]) self.built = True + + +__all__ = [ + "TFSwiftFormerPreTrainedModel", + "TFSwiftFormerModel", + "TFSwiftFormerForImageClassification", + "TFSwiftFormerMainLayer", +] diff --git a/src/transformers/models/swin/__init__.py b/src/transformers/models/swin/__init__.py index a3458fe1efb8..bcc2c035d8ce 100644 --- a/src/transformers/models/swin/__init__.py +++ b/src/transformers/models/swin/__init__.py @@ -13,70 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_swin": ["SwinConfig", "SwinOnnxConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swin"] = [ - "SwinForImageClassification", - "SwinForMaskedImageModeling", - "SwinModel", - "SwinPreTrainedModel", - "SwinBackbone", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_swin"] = [ - "TFSwinForImageClassification", - "TFSwinForMaskedImageModeling", - "TFSwinModel", - "TFSwinPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_swin import SwinConfig, SwinOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swin import ( - SwinBackbone, - SwinForImageClassification, - SwinForMaskedImageModeling, - SwinModel, - SwinPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_swin import ( - TFSwinForImageClassification, - TFSwinForMaskedImageModeling, - TFSwinModel, - TFSwinPreTrainedModel, - ) - + from .configuration_swin import * + from .modeling_swin import * + from .modeling_tf_swin import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swin/configuration_swin.py b/src/transformers/models/swin/configuration_swin.py index 321648f14930..da6ba9871407 100644 --- a/src/transformers/models/swin/configuration_swin.py +++ b/src/transformers/models/swin/configuration_swin.py @@ -174,3 +174,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["SwinConfig", "SwinOnnxConfig"] diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index 45383a36d9be..63d110c26750 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -1413,3 +1413,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "SwinPreTrainedModel", + "SwinModel", + "SwinForMaskedImageModeling", + "SwinForImageClassification", + "SwinBackbone", +] diff --git a/src/transformers/models/swin/modeling_tf_swin.py b/src/transformers/models/swin/modeling_tf_swin.py index 035b31e8d43b..3c5c15500a16 100644 --- a/src/transformers/models/swin/modeling_tf_swin.py +++ b/src/transformers/models/swin/modeling_tf_swin.py @@ -1625,3 +1625,12 @@ def build(self, input_shape=None): if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.swin.num_features]) + + +__all__ = [ + "TFSwinPreTrainedModel", + "TFSwinModel", + "TFSwinForMaskedImageModeling", + "TFSwinForImageClassification", + "TFSwinMainLayer", +] diff --git a/src/transformers/models/swin2sr/__init__.py b/src/transformers/models/swin2sr/__init__.py index 16495f1dc971..512f70032d00 100644 --- a/src/transformers/models/swin2sr/__init__.py +++ b/src/transformers/models/swin2sr/__init__.py @@ -13,61 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_swin2sr": ["Swin2SRConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swin2sr"] = [ - "Swin2SRForImageSuperResolution", - "Swin2SRModel", - "Swin2SRPreTrainedModel", - ] - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_swin2sr"] = ["Swin2SRImageProcessor"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_swin2sr import Swin2SRConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swin2sr import ( - Swin2SRForImageSuperResolution, - Swin2SRModel, - Swin2SRPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_swin2sr import Swin2SRImageProcessor - - + from .configuration_swin2sr import * + from .image_processing_swin2sr import * + from .modeling_swin2sr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swin2sr/configuration_swin2sr.py b/src/transformers/models/swin2sr/configuration_swin2sr.py index 0d910e89e4eb..a507d9d62513 100644 --- a/src/transformers/models/swin2sr/configuration_swin2sr.py +++ b/src/transformers/models/swin2sr/configuration_swin2sr.py @@ -149,3 +149,6 @@ def __init__( self.img_range = img_range self.resi_connection = resi_connection self.upsampler = upsampler + + +__all__ = ["Swin2SRConfig"] diff --git a/src/transformers/models/swin2sr/image_processing_swin2sr.py b/src/transformers/models/swin2sr/image_processing_swin2sr.py index f65842374320..b130b5fd8e36 100644 --- a/src/transformers/models/swin2sr/image_processing_swin2sr.py +++ b/src/transformers/models/swin2sr/image_processing_swin2sr.py @@ -31,11 +31,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class Swin2SRImageProcessor(BaseImageProcessor): r""" Constructs a Swin2SR image processor. @@ -201,3 +203,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["Swin2SRImageProcessor"] diff --git a/src/transformers/models/swin2sr/modeling_swin2sr.py b/src/transformers/models/swin2sr/modeling_swin2sr.py index b0a773c8af34..b7f90128cbba 100644 --- a/src/transformers/models/swin2sr/modeling_swin2sr.py +++ b/src/transformers/models/swin2sr/modeling_swin2sr.py @@ -1175,3 +1175,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["Swin2SRPreTrainedModel", "Swin2SRModel", "Swin2SRForImageSuperResolution"] diff --git a/src/transformers/models/swinv2/__init__.py b/src/transformers/models/swinv2/__init__.py index e3a13b79651f..f3939480f045 100644 --- a/src/transformers/models/swinv2/__init__.py +++ b/src/transformers/models/swinv2/__init__.py @@ -13,48 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_swinv2": ["Swinv2Config"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_swinv2"] = [ - "Swinv2ForImageClassification", - "Swinv2ForMaskedImageModeling", - "Swinv2Model", - "Swinv2PreTrainedModel", - "Swinv2Backbone", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_swinv2 import Swinv2Config - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_swinv2 import ( - Swinv2Backbone, - Swinv2ForImageClassification, - Swinv2ForMaskedImageModeling, - Swinv2Model, - Swinv2PreTrainedModel, - ) - - + from .configuration_swinv2 import * + from .modeling_swinv2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/swinv2/configuration_swinv2.py b/src/transformers/models/swinv2/configuration_swinv2.py index c6032c45df89..addb30e6a102 100644 --- a/src/transformers/models/swinv2/configuration_swinv2.py +++ b/src/transformers/models/swinv2/configuration_swinv2.py @@ -154,3 +154,6 @@ def __init__( # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + + +__all__ = ["Swinv2Config"] diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index 0c30e739a48f..9f2b7648de12 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -1458,3 +1458,12 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = [ + "Swinv2PreTrainedModel", + "Swinv2Model", + "Swinv2ForMaskedImageModeling", + "Swinv2ForImageClassification", + "Swinv2Backbone", +] diff --git a/src/transformers/models/switch_transformers/__init__.py b/src/transformers/models/switch_transformers/__init__.py index e6f9914fcbcc..de763cb4f913 100644 --- a/src/transformers/models/switch_transformers/__init__.py +++ b/src/transformers/models/switch_transformers/__init__.py @@ -11,66 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_switch_transformers": [ - "SwitchTransformersConfig", - "SwitchTransformersOnnxConfig", - ] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_switch_transformers"] = [ - "SwitchTransformersEncoderModel", - "SwitchTransformersForConditionalGeneration", - "SwitchTransformersModel", - "SwitchTransformersPreTrainedModel", - "SwitchTransformersTop1Router", - "SwitchTransformersSparseMLP", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_switch_transformers import ( - SwitchTransformersConfig, - SwitchTransformersOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_switch_transformers import ( - SwitchTransformersEncoderModel, - SwitchTransformersForConditionalGeneration, - SwitchTransformersModel, - SwitchTransformersPreTrainedModel, - SwitchTransformersSparseMLP, - SwitchTransformersTop1Router, - ) - - + from .configuration_switch_transformers import * + from .modeling_switch_transformers import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/switch_transformers/configuration_switch_transformers.py b/src/transformers/models/switch_transformers/configuration_switch_transformers.py index 5ed95f2b6138..093148f60104 100644 --- a/src/transformers/models/switch_transformers/configuration_switch_transformers.py +++ b/src/transformers/models/switch_transformers/configuration_switch_transformers.py @@ -180,3 +180,6 @@ def __init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) + + +__all__ = ["SwitchTransformersConfig"] diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index c5797d4573b7..9f8e5e7de126 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -1864,3 +1864,13 @@ def forward( ) return encoder_outputs + + +__all__ = [ + "SwitchTransformersPreTrainedModel", + "SwitchTransformersModel", + "SwitchTransformersForConditionalGeneration", + "SwitchTransformersEncoderModel", + "SwitchTransformersTop1Router", + "SwitchTransformersSparseMLP", +] diff --git a/src/transformers/models/t5/__init__.py b/src/transformers/models/t5/__init__.py index d6549e270abc..62730ae5c805 100644 --- a/src/transformers/models/t5/__init__.py +++ b/src/transformers/models/t5/__init__.py @@ -11,146 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_t5": ["T5Config", "T5OnnxConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_t5"] = ["T5Tokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_t5_fast"] = ["T5TokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_t5"] = [ - "T5EncoderModel", - "T5ForConditionalGeneration", - "T5Model", - "T5PreTrainedModel", - "load_tf_weights_in_t5", - "T5ForQuestionAnswering", - "T5ForSequenceClassification", - "T5ForTokenClassification", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_t5"] = [ - "TFT5EncoderModel", - "TFT5ForConditionalGeneration", - "TFT5Model", - "TFT5PreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_t5"] = [ - "FlaxT5EncoderModel", - "FlaxT5ForConditionalGeneration", - "FlaxT5Model", - "FlaxT5PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_t5 import T5Config, T5OnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_t5 import T5Tokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_t5_fast import T5TokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_t5 import ( - T5EncoderModel, - T5ForConditionalGeneration, - T5ForQuestionAnswering, - T5ForSequenceClassification, - T5ForTokenClassification, - T5Model, - T5PreTrainedModel, - load_tf_weights_in_t5, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_t5 import ( - TFT5EncoderModel, - TFT5ForConditionalGeneration, - TFT5Model, - TFT5PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_t5 import ( - FlaxT5EncoderModel, - FlaxT5ForConditionalGeneration, - FlaxT5Model, - FlaxT5PreTrainedModel, - ) - - + from .configuration_t5 import * + from .modeling_flax_t5 import * + from .modeling_t5 import * + from .modeling_tf_t5 import * + from .tokenization_t5 import * + from .tokenization_t5_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/t5/configuration_t5.py b/src/transformers/models/t5/configuration_t5.py index e5f2615611b8..9997c40f2369 100644 --- a/src/transformers/models/t5/configuration_t5.py +++ b/src/transformers/models/t5/configuration_t5.py @@ -161,3 +161,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def default_onnx_opset(self) -> int: return 13 + + +__all__ = ["T5Config", "T5OnnxConfig"] diff --git a/src/transformers/models/t5/modeling_flax_t5.py b/src/transformers/models/t5/modeling_flax_t5.py index be5ffd44897d..6459a443e223 100644 --- a/src/transformers/models/t5/modeling_flax_t5.py +++ b/src/transformers/models/t5/modeling_flax_t5.py @@ -1796,3 +1796,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): append_replace_return_docstrings( FlaxT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = ["FlaxT5PreTrainedModel", "FlaxT5Model", "FlaxT5EncoderModel", "FlaxT5ForConditionalGeneration"] diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index a90101924c5b..85a8d7d146c0 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -64,6 +64,8 @@ # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### + + def load_tf_weights_in_t5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: @@ -2376,3 +2378,15 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_t5", + "T5PreTrainedModel", + "T5Model", + "T5ForConditionalGeneration", + "T5EncoderModel", + "T5ForSequenceClassification", + "T5ForTokenClassification", + "T5ForQuestionAnswering", +] diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index 6cd44766bf88..f4f830936207 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -934,6 +934,8 @@ def build(self, input_shape=None): # Here you just need to specify a few (self-explanatory) # pointers for your model. #################################################### + + class TFT5PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained @@ -1678,3 +1680,6 @@ def build(self, input_shape=None): if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) + + +__all__ = ["TFT5PreTrainedModel", "TFT5Model", "TFT5ForConditionalGeneration", "TFT5EncoderModel", "TFT5MainLayer"] diff --git a/src/transformers/models/t5/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py index 1e166a78f10d..a3bce5b813c6 100644 --- a/src/transformers/models/t5/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -25,6 +25,7 @@ from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken +from ...utils.import_utils import export if TYPE_CHECKING: @@ -42,6 +43,7 @@ SPIECE_UNDERLINE = "▁" +@export(backends=("sentencepiece",)) class T5Tokenizer(PreTrainedTokenizer): """ Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -445,3 +447,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["T5Tokenizer"] diff --git a/src/transformers/models/t5/tokenization_t5_fast.py b/src/transformers/models/t5/tokenization_t5_fast.py index 0a92803f1658..fdf967faed13 100644 --- a/src/transformers/models/t5/tokenization_t5_fast.py +++ b/src/transformers/models/t5/tokenization_t5_fast.py @@ -231,3 +231,6 @@ def get_sentinel_tokens(self): def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] + + +__all__ = ["T5TokenizerFast"] diff --git a/src/transformers/models/table_transformer/__init__.py b/src/transformers/models/table_transformer/__init__.py index de993193b0c5..4ca3a81616c1 100644 --- a/src/transformers/models/table_transformer/__init__.py +++ b/src/transformers/models/table_transformer/__init__.py @@ -11,51 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_table_transformer": [ - "TableTransformerConfig", - "TableTransformerOnnxConfig", - ] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_table_transformer"] = [ - "TableTransformerForObjectDetection", - "TableTransformerModel", - "TableTransformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_table_transformer import ( - TableTransformerConfig, - TableTransformerOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_table_transformer import ( - TableTransformerForObjectDetection, - TableTransformerModel, - TableTransformerPreTrainedModel, - ) - + from .configuration_table_transformer import * + from .modeling_table_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/table_transformer/configuration_table_transformer.py b/src/transformers/models/table_transformer/configuration_table_transformer.py index e0afa14154fc..458be0eea310 100644 --- a/src/transformers/models/table_transformer/configuration_table_transformer.py +++ b/src/transformers/models/table_transformer/configuration_table_transformer.py @@ -274,3 +274,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["TableTransformerConfig", "TableTransformerOnnxConfig"] diff --git a/src/transformers/models/table_transformer/modeling_table_transformer.py b/src/transformers/models/table_transformer/modeling_table_transformer.py index 38978e9adad8..a15f48318f2a 100644 --- a/src/transformers/models/table_transformer/modeling_table_transformer.py +++ b/src/transformers/models/table_transformer/modeling_table_transformer.py @@ -1925,3 +1925,6 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask) + + +__all__ = ["TableTransformerPreTrainedModel", "TableTransformerModel", "TableTransformerForObjectDetection"] diff --git a/src/transformers/models/tapas/__init__.py b/src/transformers/models/tapas/__init__.py index 750bf7e00f5a..4d90dea5ec0f 100644 --- a/src/transformers/models/tapas/__init__.py +++ b/src/transformers/models/tapas/__init__.py @@ -11,81 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_tapas": ["TapasConfig"], - "tokenization_tapas": ["TapasTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tapas"] = [ - "TapasForMaskedLM", - "TapasForQuestionAnswering", - "TapasForSequenceClassification", - "TapasModel", - "TapasPreTrainedModel", - "load_tf_weights_in_tapas", - ] -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_tapas"] = [ - "TFTapasForMaskedLM", - "TFTapasForQuestionAnswering", - "TFTapasForSequenceClassification", - "TFTapasModel", - "TFTapasPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_tapas import TapasConfig - from .tokenization_tapas import TapasTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tapas import ( - TapasForMaskedLM, - TapasForQuestionAnswering, - TapasForSequenceClassification, - TapasModel, - TapasPreTrainedModel, - load_tf_weights_in_tapas, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_tapas import ( - TFTapasForMaskedLM, - TFTapasForQuestionAnswering, - TFTapasForSequenceClassification, - TFTapasModel, - TFTapasPreTrainedModel, - ) - - + from .configuration_tapas import * + from .modeling_tapas import * + from .modeling_tf_tapas import * + from .tokenization_tapas import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/tapas/configuration_tapas.py b/src/transformers/models/tapas/configuration_tapas.py index 63d289e38fed..58769e99a722 100644 --- a/src/transformers/models/tapas/configuration_tapas.py +++ b/src/transformers/models/tapas/configuration_tapas.py @@ -224,3 +224,6 @@ def __init__( if isinstance(self.aggregation_labels, dict): self.aggregation_labels = {int(k): v for k, v in aggregation_labels.items()} + + +__all__ = ["TapasConfig"] diff --git a/src/transformers/models/tapas/modeling_tapas.py b/src/transformers/models/tapas/modeling_tapas.py index b74a27ae5ce5..7ae956906c1b 100644 --- a/src/transformers/models/tapas/modeling_tapas.py +++ b/src/transformers/models/tapas/modeling_tapas.py @@ -2387,3 +2387,13 @@ def _calculate_regression_loss( per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask + + +__all__ = [ + "load_tf_weights_in_tapas", + "TapasPreTrainedModel", + "TapasModel", + "TapasForMaskedLM", + "TapasForQuestionAnswering", + "TapasForSequenceClassification", +] diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index afb1c3cbda8b..9bcbea9cd717 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -2451,3 +2451,13 @@ def _calculate_regression_loss( ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask + + +__all__ = [ + "TFTapasPreTrainedModel", + "TFTapasModel", + "TFTapasForMaskedLM", + "TFTapasForQuestionAnswering", + "TFTapasForSequenceClassification", + "TFTapasMainLayer", +] diff --git a/src/transformers/models/tapas/tokenization_tapas.py b/src/transformers/models/tapas/tokenization_tapas.py index 2da9fe40c1ce..b9fc65184763 100644 --- a/src/transformers/models/tapas/tokenization_tapas.py +++ b/src/transformers/models/tapas/tokenization_tapas.py @@ -2761,3 +2761,6 @@ def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=N table.iloc[row_index, col_index].numeric_value = numeric_value return table + + +__all__ = ["TapasTokenizer"] diff --git a/src/transformers/models/time_series_transformer/__init__.py b/src/transformers/models/time_series_transformer/__init__.py index 39879ed1bc00..928200c74186 100644 --- a/src/transformers/models/time_series_transformer/__init__.py +++ b/src/transformers/models/time_series_transformer/__init__.py @@ -13,44 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_time_series_transformer": ["TimeSeriesTransformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_time_series_transformer"] = [ - "TimeSeriesTransformerForPrediction", - "TimeSeriesTransformerModel", - "TimeSeriesTransformerPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_time_series_transformer import ( - TimeSeriesTransformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_time_series_transformer import ( - TimeSeriesTransformerForPrediction, - TimeSeriesTransformerModel, - TimeSeriesTransformerPreTrainedModel, - ) - + from .configuration_time_series_transformer import * + from .modeling_time_series_transformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py index 56b06c038412..bc063774389c 100644 --- a/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/configuration_time_series_transformer.py @@ -224,3 +224,6 @@ def _number_of_features(self) -> int: + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features ) + + +__all__ = ["TimeSeriesTransformerConfig"] diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index b45e6d7e850d..77e9c91fb53c 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -1779,3 +1779,6 @@ def generate( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) ) + + +__all__ = ["TimeSeriesTransformerPreTrainedModel", "TimeSeriesTransformerModel", "TimeSeriesTransformerForPrediction"] diff --git a/src/transformers/models/timesformer/__init__.py b/src/transformers/models/timesformer/__init__.py index 48a2aa9fa474..7345be9af8d0 100644 --- a/src/transformers/models/timesformer/__init__.py +++ b/src/transformers/models/timesformer/__init__.py @@ -13,41 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_timesformer": ["TimesformerConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_timesformer"] = [ - "TimesformerModel", - "TimesformerForVideoClassification", - "TimesformerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_timesformer import TimesformerConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_timesformer import ( - TimesformerForVideoClassification, - TimesformerModel, - TimesformerPreTrainedModel, - ) - + from .configuration_timesformer import * + from .modeling_timesformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/timesformer/configuration_timesformer.py b/src/transformers/models/timesformer/configuration_timesformer.py index 2ee7125de255..edb69af230f9 100644 --- a/src/transformers/models/timesformer/configuration_timesformer.py +++ b/src/transformers/models/timesformer/configuration_timesformer.py @@ -124,3 +124,6 @@ def __init__( self.attention_type = attention_type self.drop_path_rate = drop_path_rate + + +__all__ = ["TimesformerConfig"] diff --git a/src/transformers/models/timesformer/modeling_timesformer.py b/src/transformers/models/timesformer/modeling_timesformer.py index 2262898d5474..638ec33c0660 100644 --- a/src/transformers/models/timesformer/modeling_timesformer.py +++ b/src/transformers/models/timesformer/modeling_timesformer.py @@ -811,3 +811,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["TimesformerPreTrainedModel", "TimesformerModel", "TimesformerForVideoClassification"] diff --git a/src/transformers/models/timm_backbone/__init__.py b/src/transformers/models/timm_backbone/__init__.py index 4c692f76432f..b3a72a64d9a8 100644 --- a/src/transformers/models/timm_backbone/__init__.py +++ b/src/transformers/models/timm_backbone/__init__.py @@ -1,49 +1,17 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_timm_backbone": ["TimmBackboneConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_timm_backbone"] = ["TimmBackbone"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_timm_backbone import TimmBackboneConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_timm_backbone import TimmBackbone - + from .modeling_timm_backbone import * + from .configuration_timm_backbone import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/timm_backbone/configuration_timm_backbone.py b/src/transformers/models/timm_backbone/configuration_timm_backbone.py index dd8893820c3b..6000698c9248 100644 --- a/src/transformers/models/timm_backbone/configuration_timm_backbone.py +++ b/src/transformers/models/timm_backbone/configuration_timm_backbone.py @@ -81,3 +81,6 @@ def __init__( self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else [-1] self.freeze_batch_norm_2d = freeze_batch_norm_2d + + +__all__ = ["TimmBackboneConfig"] diff --git a/src/transformers/models/timm_backbone/modeling_timm_backbone.py b/src/transformers/models/timm_backbone/modeling_timm_backbone.py index ffe83daf7bc2..a097446200b7 100644 --- a/src/transformers/models/timm_backbone/modeling_timm_backbone.py +++ b/src/transformers/models/timm_backbone/modeling_timm_backbone.py @@ -161,3 +161,6 @@ def forward( return output return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states, attentions=None) + + +__all__ = ["TimmBackbone"] diff --git a/src/transformers/models/trocr/__init__.py b/src/transformers/models/trocr/__init__.py index 14854857586d..3c54fe1c5480 100644 --- a/src/transformers/models/trocr/__init__.py +++ b/src/transformers/models/trocr/__init__.py @@ -13,46 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_speech_available, - is_torch_available, -) - - -_import_structure = { - "configuration_trocr": ["TrOCRConfig"], - "processing_trocr": ["TrOCRProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_trocr"] = [ - "TrOCRForCausalLM", - "TrOCRPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_trocr import TrOCRConfig - from .processing_trocr import TrOCRProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_trocr import TrOCRForCausalLM, TrOCRPreTrainedModel - + from .configuration_trocr import * + from .modeling_trocr import * + from .processing_trocr import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/trocr/configuration_trocr.py b/src/transformers/models/trocr/configuration_trocr.py index f47412e93a50..6c3aabbe1958 100644 --- a/src/transformers/models/trocr/configuration_trocr.py +++ b/src/transformers/models/trocr/configuration_trocr.py @@ -141,3 +141,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["TrOCRConfig"] diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 04eb40ab2a2f..6dc611fa81ef 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -978,3 +978,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["TrOCRPreTrainedModel", "TrOCRForCausalLM"] diff --git a/src/transformers/models/trocr/processing_trocr.py b/src/transformers/models/trocr/processing_trocr.py index b0d2e823fe68..21def8ab532c 100644 --- a/src/transformers/models/trocr/processing_trocr.py +++ b/src/transformers/models/trocr/processing_trocr.py @@ -139,3 +139,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["TrOCRProcessor"] diff --git a/src/transformers/models/tvp/__init__.py b/src/transformers/models/tvp/__init__.py index b8479dbdd331..1470acbe42b1 100644 --- a/src/transformers/models/tvp/__init__.py +++ b/src/transformers/models/tvp/__init__.py @@ -14,61 +14,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_tvp": ["TvpConfig"], - "processing_tvp": ["TvpProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_tvp"] = ["TvpImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tvp"] = [ - "TvpModel", - "TvpPreTrainedModel", - "TvpForVideoGrounding", - ] - if TYPE_CHECKING: - from .configuration_tvp import ( - TvpConfig, - ) - from .processing_tvp import TvpProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_tvp import TvpImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tvp import ( - TvpForVideoGrounding, - TvpModel, - TvpPreTrainedModel, - ) - + from .configuration_tvp import * + from .image_processing_tvp import * + from .modeling_tvp import * + from .processing_tvp import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/tvp/configuration_tvp.py b/src/transformers/models/tvp/configuration_tvp.py index 2941c4fcbe13..803acb220d08 100644 --- a/src/transformers/models/tvp/configuration_tvp.py +++ b/src/transformers/models/tvp/configuration_tvp.py @@ -196,3 +196,6 @@ def to_dict(self): output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +__all__ = ["TvpConfig"] diff --git a/src/transformers/models/tvp/image_processing_tvp.py b/src/transformers/models/tvp/image_processing_tvp.py index 100ec133e8b0..feba14c1795d 100644 --- a/src/transformers/models/tvp/image_processing_tvp.py +++ b/src/transformers/models/tvp/image_processing_tvp.py @@ -39,6 +39,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -81,6 +82,7 @@ def get_resize_output_image_size( return size +@export(backends=("vision",)) class TvpImageProcessor(BaseImageProcessor): r""" Constructs a Tvp image processor. @@ -476,3 +478,6 @@ def preprocess( data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["TvpImageProcessor"] diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py index ec00eee92861..04bcf12a83b7 100644 --- a/src/transformers/models/tvp/modeling_tvp.py +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -980,3 +980,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["TvpPreTrainedModel", "TvpModel", "TvpForVideoGrounding"] diff --git a/src/transformers/models/tvp/processing_tvp.py b/src/transformers/models/tvp/processing_tvp.py index eb8aabfdade3..a4ed81e54aad 100644 --- a/src/transformers/models/tvp/processing_tvp.py +++ b/src/transformers/models/tvp/processing_tvp.py @@ -151,3 +151,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["TvpProcessor"] diff --git a/src/transformers/models/udop/__init__.py b/src/transformers/models/udop/__init__.py index 732d97aa7a99..cf4c36f6363f 100644 --- a/src/transformers/models/udop/__init__.py +++ b/src/transformers/models/udop/__init__.py @@ -11,86 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_udop": ["UdopConfig"], - "processing_udop": ["UdopProcessor"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_udop"] = ["UdopTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_udop_fast"] = ["UdopTokenizerFast"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_udop"] = [ - "UdopForConditionalGeneration", - "UdopPreTrainedModel", - "UdopModel", - "UdopEncoderModel", - ] if TYPE_CHECKING: - from .configuration_udop import UdopConfig - from .processing_udop import UdopProcessor - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_udop import UdopTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_udop_fast import UdopTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_udop import ( - UdopEncoderModel, - UdopForConditionalGeneration, - UdopModel, - UdopPreTrainedModel, - ) - + from .configuration_udop import * + from .modeling_udop import * + from .processing_udop import * + from .tokenization_udop import * + from .tokenization_udop_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/udop/configuration_udop.py b/src/transformers/models/udop/configuration_udop.py index 5ae8bcebfd79..6ed28c78c8fb 100644 --- a/src/transformers/models/udop/configuration_udop.py +++ b/src/transformers/models/udop/configuration_udop.py @@ -155,3 +155,6 @@ def __init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) + + +__all__ = ["UdopConfig"] diff --git a/src/transformers/models/udop/modeling_udop.py b/src/transformers/models/udop/modeling_udop.py index 972248daaae5..16eda685df5e 100644 --- a/src/transformers/models/udop/modeling_udop.py +++ b/src/transformers/models/udop/modeling_udop.py @@ -2039,3 +2039,6 @@ def forward( ) return encoder_outputs + + +__all__ = ["UdopPreTrainedModel", "UdopModel", "UdopForConditionalGeneration", "UdopEncoderModel"] diff --git a/src/transformers/models/udop/processing_udop.py b/src/transformers/models/udop/processing_udop.py index 2902541d6f5b..9647a67dfd82 100644 --- a/src/transformers/models/udop/processing_udop.py +++ b/src/transformers/models/udop/processing_udop.py @@ -202,3 +202,6 @@ def decode(self, *args, **kwargs): # Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.model_input_names def model_input_names(self): return ["input_ids", "bbox", "attention_mask", "pixel_values"] + + +__all__ = ["UdopProcessor"] diff --git a/src/transformers/models/udop/tokenization_udop.py b/src/transformers/models/udop/tokenization_udop.py index 4be979981916..d105dda809b9 100644 --- a/src/transformers/models/udop/tokenization_udop.py +++ b/src/transformers/models/udop/tokenization_udop.py @@ -33,6 +33,7 @@ TruncationStrategy, ) from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -147,6 +148,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} +@export(backends=("sentencepiece",)) class UdopTokenizer(PreTrainedTokenizer): """ Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on @@ -1462,3 +1464,6 @@ def _pad( raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs + + +__all__ = ["UdopTokenizer"] diff --git a/src/transformers/models/udop/tokenization_udop_fast.py b/src/transformers/models/udop/tokenization_udop_fast.py index 8340c4af4e2b..03bada6f6047 100644 --- a/src/transformers/models/udop/tokenization_udop_fast.py +++ b/src/transformers/models/udop/tokenization_udop_fast.py @@ -1010,3 +1010,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["UdopTokenizerFast"] diff --git a/src/transformers/models/umt5/__init__.py b/src/transformers/models/umt5/__init__.py index e68ae4cb3737..8707d2656397 100644 --- a/src/transformers/models/umt5/__init__.py +++ b/src/transformers/models/umt5/__init__.py @@ -11,50 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_umt5": ["UMT5Config", "UMT5OnnxConfig"]} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_umt5"] = [ - "UMT5EncoderModel", - "UMT5ForConditionalGeneration", - "UMT5ForQuestionAnswering", - "UMT5ForSequenceClassification", - "UMT5ForTokenClassification", - "UMT5Model", - "UMT5PreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_umt5 import UMT5Config, UMT5OnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_umt5 import ( - UMT5EncoderModel, - UMT5ForConditionalGeneration, - UMT5ForQuestionAnswering, - UMT5ForSequenceClassification, - UMT5ForTokenClassification, - UMT5Model, - UMT5PreTrainedModel, - ) + from .configuration_umt5 import * + from .modeling_umt5 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/umt5/configuration_umt5.py b/src/transformers/models/umt5/configuration_umt5.py index d7323d759fd0..b6112db02f82 100644 --- a/src/transformers/models/umt5/configuration_umt5.py +++ b/src/transformers/models/umt5/configuration_umt5.py @@ -171,3 +171,6 @@ def default_onnx_opset(self) -> int: @property def atol_for_validation(self) -> float: return 5e-4 + + +__all__ = ["UMT5Config", "UMT5OnnxConfig"] diff --git a/src/transformers/models/umt5/modeling_umt5.py b/src/transformers/models/umt5/modeling_umt5.py index 3271689540b9..3368c1885710 100644 --- a/src/transformers/models/umt5/modeling_umt5.py +++ b/src/transformers/models/umt5/modeling_umt5.py @@ -1855,3 +1855,14 @@ def forward( encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "UMT5PreTrainedModel", + "UMT5Model", + "UMT5ForConditionalGeneration", + "UMT5EncoderModel", + "UMT5ForSequenceClassification", + "UMT5ForTokenClassification", + "UMT5ForQuestionAnswering", +] diff --git a/src/transformers/models/unispeech/__init__.py b/src/transformers/models/unispeech/__init__.py index 91db9ada5ef2..19590cb1b5c6 100644 --- a/src/transformers/models/unispeech/__init__.py +++ b/src/transformers/models/unispeech/__init__.py @@ -13,49 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_unispeech": ["UniSpeechConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_unispeech"] = [ - "UniSpeechForCTC", - "UniSpeechForPreTraining", - "UniSpeechForSequenceClassification", - "UniSpeechModel", - "UniSpeechPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_unispeech import UniSpeechConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_unispeech import ( - UniSpeechForCTC, - UniSpeechForPreTraining, - UniSpeechForSequenceClassification, - UniSpeechModel, - UniSpeechPreTrainedModel, - ) - + from .configuration_unispeech import * + from .modeling_unispeech import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/unispeech/configuration_unispeech.py b/src/transformers/models/unispeech/configuration_unispeech.py index 69bc162220d9..ccfe52f79c1c 100644 --- a/src/transformers/models/unispeech/configuration_unispeech.py +++ b/src/transformers/models/unispeech/configuration_unispeech.py @@ -304,3 +304,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["UniSpeechConfig"] diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index 4202f680437c..7ff8eadbf763 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -1896,3 +1896,12 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "UniSpeechPreTrainedModel", + "UniSpeechModel", + "UniSpeechForPreTraining", + "UniSpeechForCTC", + "UniSpeechForSequenceClassification", +] diff --git a/src/transformers/models/unispeech_sat/__init__.py b/src/transformers/models/unispeech_sat/__init__.py index 275f98ac2220..192681b0126b 100644 --- a/src/transformers/models/unispeech_sat/__init__.py +++ b/src/transformers/models/unispeech_sat/__init__.py @@ -13,55 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_unispeech_sat": ["UniSpeechSatConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_unispeech_sat"] = [ - "UniSpeechSatForAudioFrameClassification", - "UniSpeechSatForCTC", - "UniSpeechSatForPreTraining", - "UniSpeechSatForSequenceClassification", - "UniSpeechSatForXVector", - "UniSpeechSatModel", - "UniSpeechSatPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_unispeech_sat import UniSpeechSatConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_unispeech_sat import ( - UniSpeechSatForAudioFrameClassification, - UniSpeechSatForCTC, - UniSpeechSatForPreTraining, - UniSpeechSatForSequenceClassification, - UniSpeechSatForXVector, - UniSpeechSatModel, - UniSpeechSatPreTrainedModel, - ) - + from .configuration_unispeech_sat import * + from .modeling_unispeech_sat import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py index 85661b02b686..a33403306e8a 100644 --- a/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/configuration_unispeech_sat.py @@ -322,3 +322,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["UniSpeechSatConfig"] diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index bfb2cbfa4f55..dd3f6976c852 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -2228,3 +2228,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "UniSpeechSatPreTrainedModel", + "UniSpeechSatModel", + "UniSpeechSatForPreTraining", + "UniSpeechSatForCTC", + "UniSpeechSatForSequenceClassification", + "UniSpeechSatForAudioFrameClassification", + "UniSpeechSatForXVector", +] diff --git a/src/transformers/models/univnet/__init__.py b/src/transformers/models/univnet/__init__.py index ea9babc3314f..a8642d30c167 100644 --- a/src/transformers/models/univnet/__init__.py +++ b/src/transformers/models/univnet/__init__.py @@ -11,49 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) - - -_import_structure = { - "configuration_univnet": ["UnivNetConfig"], - "feature_extraction_univnet": ["UnivNetFeatureExtractor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_univnet"] = [ - "UnivNetModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_univnet import ( - UnivNetConfig, - ) - from .feature_extraction_univnet import UnivNetFeatureExtractor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_univnet import ( - UnivNetModel, - ) - + from .configuration_univnet import * + from .feature_extraction_univnet import * + from .modeling_univnet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/univnet/configuration_univnet.py b/src/transformers/models/univnet/configuration_univnet.py index 0f4dceb47948..0a3811ee3a26 100644 --- a/src/transformers/models/univnet/configuration_univnet.py +++ b/src/transformers/models/univnet/configuration_univnet.py @@ -120,3 +120,6 @@ def __init__( self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope super().__init__(**kwargs) + + +__all__ = ["UnivNetConfig"] diff --git a/src/transformers/models/univnet/feature_extraction_univnet.py b/src/transformers/models/univnet/feature_extraction_univnet.py index 067aacc3d8c8..ab9d3ed5dd7f 100644 --- a/src/transformers/models/univnet/feature_extraction_univnet.py +++ b/src/transformers/models/univnet/feature_extraction_univnet.py @@ -454,3 +454,6 @@ def to_dict(self) -> Dict[str, Any]: del output[name] return output + + +__all__ = ["UnivNetFeatureExtractor"] diff --git a/src/transformers/models/univnet/modeling_univnet.py b/src/transformers/models/univnet/modeling_univnet.py index 5b0c659c302a..f66758b96f84 100644 --- a/src/transformers/models/univnet/modeling_univnet.py +++ b/src/transformers/models/univnet/modeling_univnet.py @@ -629,3 +629,6 @@ def remove_weight_norm(self): for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post) + + +__all__ = ["UnivNetModel"] diff --git a/src/transformers/models/upernet/__init__.py b/src/transformers/models/upernet/__init__.py index 3954fe4594da..f798fe556197 100644 --- a/src/transformers/models/upernet/__init__.py +++ b/src/transformers/models/upernet/__init__.py @@ -13,38 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_upernet": ["UperNetConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_upernet"] = [ - "UperNetForSemanticSegmentation", - "UperNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_upernet import UperNetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel - - + from .configuration_upernet import * + from .modeling_upernet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/upernet/configuration_upernet.py b/src/transformers/models/upernet/configuration_upernet.py index 3e17fd4289d8..c235c17d9cb6 100644 --- a/src/transformers/models/upernet/configuration_upernet.py +++ b/src/transformers/models/upernet/configuration_upernet.py @@ -135,3 +135,6 @@ def __init__( self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.loss_ignore_index = loss_ignore_index + + +__all__ = ["UperNetConfig"] diff --git a/src/transformers/models/upernet/modeling_upernet.py b/src/transformers/models/upernet/modeling_upernet.py index 9721cdcb4b0e..ef5181106978 100644 --- a/src/transformers/models/upernet/modeling_upernet.py +++ b/src/transformers/models/upernet/modeling_upernet.py @@ -438,3 +438,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["UperNetPreTrainedModel", "UperNetForSemanticSegmentation"] diff --git a/src/transformers/models/video_llava/__init__.py b/src/transformers/models/video_llava/__init__.py index d1f4beabc979..bb5f5971a167 100644 --- a/src/transformers/models/video_llava/__init__.py +++ b/src/transformers/models/video_llava/__init__.py @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_video_llava": ["VideoLlavaConfig"], - "processing_video_llava": ["VideoLlavaProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_video_llava"] = ["VideoLlavaImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_video_llava"] = [ - "VideoLlavaPreTrainedModel", - "VideoLlavaForConditionalGeneration", - ] - if TYPE_CHECKING: - from .configuration_video_llava import ( - VideoLlavaConfig, - ) - from .image_processing_video_llava import VideoLlavaProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_video_llava import VideoLlavaImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_video_llava import ( - VideoLlavaForConditionalGeneration, - VideoLlavaPreTrainedModel, - ) - + from .configuration_video_llava import * + from .image_processing_video_llava import * + from .modeling_video_llava import * + from .processing_video_llava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/video_llava/configuration_video_llava.py b/src/transformers/models/video_llava/configuration_video_llava.py index 8738a02585e0..00f9ab07bac2 100644 --- a/src/transformers/models/video_llava/configuration_video_llava.py +++ b/src/transformers/models/video_llava/configuration_video_llava.py @@ -132,3 +132,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["VideoLlavaConfig"] diff --git a/src/transformers/models/video_llava/image_processing_video_llava.py b/src/transformers/models/video_llava/image_processing_video_llava.py index 3e77110c7d45..a4be121cdc0f 100644 --- a/src/transformers/models/video_llava/image_processing_video_llava.py +++ b/src/transformers/models/video_llava/image_processing_video_llava.py @@ -41,6 +41,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -66,6 +67,7 @@ def make_batched_videos(videos) -> List[VideoInput]: raise ValueError(f"Could not make batched video from {videos}") +@export(backends=("vision",)) class VideoLlavaImageProcessor(BaseImageProcessor): r""" Constructs a CLIP image processor. @@ -402,3 +404,6 @@ def _preprocess_image( image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image + + +__all__ = ["VideoLlavaImageProcessor"] diff --git a/src/transformers/models/video_llava/modeling_video_llava.py b/src/transformers/models/video_llava/modeling_video_llava.py index b9263ad15cbf..e7eba1a69798 100644 --- a/src/transformers/models/video_llava/modeling_video_llava.py +++ b/src/transformers/models/video_llava/modeling_video_llava.py @@ -705,3 +705,6 @@ def prepare_inputs_for_generation( model_inputs["pixel_values_videos"] = pixel_values_videos return model_inputs + + +__all__ = ["VideoLlavaPreTrainedModel", "VideoLlavaForConditionalGeneration"] diff --git a/src/transformers/models/video_llava/processing_video_llava.py b/src/transformers/models/video_llava/processing_video_llava.py index a06913d7acf7..69298bf7253f 100644 --- a/src/transformers/models/video_llava/processing_video_llava.py +++ b/src/transformers/models/video_llava/processing_video_llava.py @@ -207,3 +207,6 @@ def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + +__all__ = ["VideoLlavaProcessor"] diff --git a/src/transformers/models/videomae/__init__.py b/src/transformers/models/videomae/__init__.py index 0e52081adbca..e8ff2440bb6c 100644 --- a/src/transformers/models/videomae/__init__.py +++ b/src/transformers/models/videomae/__init__.py @@ -13,61 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_videomae": ["VideoMAEConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_videomae"] = [ - "VideoMAEForPreTraining", - "VideoMAEModel", - "VideoMAEPreTrainedModel", - "VideoMAEForVideoClassification", - ] - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"] - _import_structure["image_processing_videomae"] = ["VideoMAEImageProcessor"] - if TYPE_CHECKING: - from .configuration_videomae import VideoMAEConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_videomae import ( - VideoMAEForPreTraining, - VideoMAEForVideoClassification, - VideoMAEModel, - VideoMAEPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_videomae import VideoMAEFeatureExtractor - from .image_processing_videomae import VideoMAEImageProcessor - + from .configuration_videomae import * + from .feature_extraction_videomae import * + from .image_processing_videomae import * + from .modeling_videomae import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/videomae/configuration_videomae.py b/src/transformers/models/videomae/configuration_videomae.py index a6150f343350..3940b6f01003 100644 --- a/src/transformers/models/videomae/configuration_videomae.py +++ b/src/transformers/models/videomae/configuration_videomae.py @@ -143,3 +143,6 @@ def __init__( self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.norm_pix_loss = norm_pix_loss + + +__all__ = ["VideoMAEConfig"] diff --git a/src/transformers/models/videomae/feature_extraction_videomae.py b/src/transformers/models/videomae/feature_extraction_videomae.py index 4a90d10c9c55..fa6e60e1b13e 100644 --- a/src/transformers/models/videomae/feature_extraction_videomae.py +++ b/src/transformers/models/videomae/feature_extraction_videomae.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_videomae import VideoMAEImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class VideoMAEFeatureExtractor(VideoMAEImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["VideoMAEFeatureExtractor"] diff --git a/src/transformers/models/videomae/image_processing_videomae.py b/src/transformers/models/videomae/image_processing_videomae.py index 413589523aa6..03b9b000fca9 100644 --- a/src/transformers/models/videomae/image_processing_videomae.py +++ b/src/transformers/models/videomae/image_processing_videomae.py @@ -38,6 +38,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -60,6 +61,7 @@ def make_batched(videos) -> List[List[ImageInput]]: raise ValueError(f"Could not make batched video from {videos}") +@export(backends=("vision",)) class VideoMAEImageProcessor(BaseImageProcessor): r""" Constructs a VideoMAE image processor. @@ -343,3 +345,6 @@ def preprocess( data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["VideoMAEImageProcessor"] diff --git a/src/transformers/models/videomae/modeling_videomae.py b/src/transformers/models/videomae/modeling_videomae.py index 73a680ba3a72..a623c4d8b2dd 100755 --- a/src/transformers/models/videomae/modeling_videomae.py +++ b/src/transformers/models/videomae/modeling_videomae.py @@ -1133,3 +1133,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["VideoMAEPreTrainedModel", "VideoMAEModel", "VideoMAEForPreTraining", "VideoMAEForVideoClassification"] diff --git a/src/transformers/models/vilt/__init__.py b/src/transformers/models/vilt/__init__.py index 6fcfd64c8beb..bfda9f9725c2 100644 --- a/src/transformers/models/vilt/__init__.py +++ b/src/transformers/models/vilt/__init__.py @@ -13,71 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_vilt": ["ViltConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_vilt"] = ["ViltFeatureExtractor"] - _import_structure["image_processing_vilt"] = ["ViltImageProcessor"] - _import_structure["processing_vilt"] = ["ViltProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vilt"] = [ - "ViltForImageAndTextRetrieval", - "ViltForImagesAndTextClassification", - "ViltForTokenClassification", - "ViltForMaskedLM", - "ViltForQuestionAnswering", - "ViltLayer", - "ViltModel", - "ViltPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vilt import ViltConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_vilt import ViltFeatureExtractor - from .image_processing_vilt import ViltImageProcessor - from .processing_vilt import ViltProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vilt import ( - ViltForImageAndTextRetrieval, - ViltForImagesAndTextClassification, - ViltForMaskedLM, - ViltForQuestionAnswering, - ViltForTokenClassification, - ViltLayer, - ViltModel, - ViltPreTrainedModel, - ) - - + from .configuration_vilt import * + from .feature_extraction_vilt import * + from .image_processing_vilt import * + from .modeling_vilt import * + from .processing_vilt import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vilt/configuration_vilt.py b/src/transformers/models/vilt/configuration_vilt.py index a57b40068207..cc6d727f5985 100644 --- a/src/transformers/models/vilt/configuration_vilt.py +++ b/src/transformers/models/vilt/configuration_vilt.py @@ -142,3 +142,6 @@ def __init__( self.qkv_bias = qkv_bias self.max_image_length = max_image_length self.num_images = num_images + + +__all__ = ["ViltConfig"] diff --git a/src/transformers/models/vilt/feature_extraction_vilt.py b/src/transformers/models/vilt/feature_extraction_vilt.py index 5091946bf943..c3d8215002c4 100644 --- a/src/transformers/models/vilt/feature_extraction_vilt.py +++ b/src/transformers/models/vilt/feature_extraction_vilt.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_vilt import ViltImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ViltFeatureExtractor(ViltImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ViltFeatureExtractor"] diff --git a/src/transformers/models/vilt/image_processing_vilt.py b/src/transformers/models/vilt/image_processing_vilt.py index 66ffeb816fec..22b99af2d525 100644 --- a/src/transformers/models/vilt/image_processing_vilt.py +++ b/src/transformers/models/vilt/image_processing_vilt.py @@ -35,6 +35,7 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging +from ...utils.import_utils import export if is_vision_available(): @@ -118,6 +119,7 @@ def get_resize_output_image_size( return new_height, new_width +@export(backends=("vision",)) class ViltImageProcessor(BaseImageProcessor): r""" Constructs a ViLT image processor. @@ -484,3 +486,6 @@ def preprocess( encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) return encoded_outputs + + +__all__ = ["ViltImageProcessor"] diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index f79606b78966..bcde1870c14e 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -1485,3 +1485,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "ViltPreTrainedModel", + "ViltModel", + "ViltForMaskedLM", + "ViltForQuestionAnswering", + "ViltForImageAndTextRetrieval", + "ViltForImagesAndTextClassification", + "ViltForTokenClassification", +] diff --git a/src/transformers/models/vilt/processing_vilt.py b/src/transformers/models/vilt/processing_vilt.py index 0ccb884ea00c..0712551d708c 100644 --- a/src/transformers/models/vilt/processing_vilt.py +++ b/src/transformers/models/vilt/processing_vilt.py @@ -22,8 +22,10 @@ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType +from ...utils.import_utils import export +@export(backends=("vision",)) class ViltProcessor(ProcessorMixin): r""" Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor. @@ -146,3 +148,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["ViltProcessor"] diff --git a/src/transformers/models/vipllava/__init__.py b/src/transformers/models/vipllava/__init__.py index edc2a5106ba7..408608b7d1a1 100644 --- a/src/transformers/models/vipllava/__init__.py +++ b/src/transformers/models/vipllava/__init__.py @@ -13,40 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_vipllava": ["VipLlavaConfig"]} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vipllava"] = [ - "VipLlavaForConditionalGeneration", - "VipLlavaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vipllava import VipLlavaConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vipllava import ( - VipLlavaForConditionalGeneration, - VipLlavaPreTrainedModel, - ) - - + from .configuration_vipllava import * + from .modeling_vipllava import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vipllava/configuration_vipllava.py b/src/transformers/models/vipllava/configuration_vipllava.py index f88be5adfba0..e5811778bd87 100644 --- a/src/transformers/models/vipllava/configuration_vipllava.py +++ b/src/transformers/models/vipllava/configuration_vipllava.py @@ -120,3 +120,6 @@ def __init__( self.text_config = text_config super().__init__(**kwargs) + + +__all__ = ["VipLlavaConfig"] diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index e036d6fb7667..7ac38096e52e 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -21,9 +21,9 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -587,3 +587,6 @@ def prepare_inputs_for_generation( model_inputs["pixel_values"] = pixel_values return model_inputs + + +__all__ = ["VipLlavaPreTrainedModel", "VipLlavaForConditionalGeneration"] diff --git a/src/transformers/models/vision_encoder_decoder/__init__.py b/src/transformers/models/vision_encoder_decoder/__init__.py index b0fe3bdc82a9..2cb4831aae8d 100644 --- a/src/transformers/models/vision_encoder_decoder/__init__.py +++ b/src/transformers/models/vision_encoder_decoder/__init__.py @@ -11,74 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vision_encoder_decoder"] = ["VisionEncoderDecoderModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vision_encoder_decoder"] = ["TFVisionEncoderDecoderModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_vision_encoder_decoder"] = ["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel - + from .configuration_vision_encoder_decoder import * + from .modeling_flax_vision_encoder_decoder import * + from .modeling_tf_vision_encoder_decoder import * + from .modeling_vision_encoder_decoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py index a4aa663f9852..7c5a83b39947 100644 --- a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py @@ -25,7 +25,7 @@ if TYPE_CHECKING: - from ... import PreTrainedTokenizerBase, TensorType + from ...tokenization_utils import PreTrainedTokenizerBase, TensorType logger = logging.get_logger(__name__) @@ -207,3 +207,6 @@ def get_decoder_config( """ decoder_config.encoder_hidden_size = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature) + + +__all__ = ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] diff --git a/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py index 1af006a3525a..5bb927f2e1b0 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_flax_vision_encoder_decoder.py @@ -859,3 +859,6 @@ def from_encoder_decoder_pretrained( model.params["decoder"] = decoder.params return model + + +__all__ = ["FlaxVisionEncoderDecoderModel"] diff --git a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py index 383dd0e3e451..9a027f04784a 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -695,3 +695,6 @@ def build(self, input_shape=None): if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) + + +__all__ = ["TFVisionEncoderDecoderModel"] diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index 979bd69de9be..29414b906ef7 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -678,3 +678,6 @@ def resize_token_embeddings(self, *args, **kwargs): def _reorder_cache(self, past_key_values, beam_idx): # apply decoder cache reordering here return self.decoder._reorder_cache(past_key_values, beam_idx) + + +__all__ = ["VisionEncoderDecoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/__init__.py b/src/transformers/models/vision_text_dual_encoder/__init__.py index 27c117274b64..fa804651af26 100644 --- a/src/transformers/models/vision_text_dual_encoder/__init__.py +++ b/src/transformers/models/vision_text_dual_encoder/__init__.py @@ -13,77 +13,18 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], - "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vision_text_dual_encoder"] = ["VisionTextDualEncoderModel"] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_vision_text_dual_encoder"] = ["FlaxVisionTextDualEncoderModel"] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vision_text_dual_encoder"] = ["TFVisionTextDualEncoderModel"] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig - from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel - - + from .configuration_vision_text_dual_encoder import * + from .modeling_flax_vision_text_dual_encoder import * + from .modeling_tf_vision_text_dual_encoder import * + from .modeling_vision_text_dual_encoder import * + from .processing_vision_text_dual_encoder import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py index 4cea34ca2313..363cebdf4136 100644 --- a/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py @@ -116,3 +116,6 @@ def from_vision_text_configs(cls, vision_config: PretrainedConfig, text_config: """ return cls(vision_config=vision_config.to_dict(), text_config=text_config.to_dict(), **kwargs) + + +__all__ = ["VisionTextDualEncoderConfig"] diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py index 23244af1e31c..648dc8200c4e 100644 --- a/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_flax_vision_text_dual_encoder.py @@ -596,3 +596,5 @@ def from_vision_text_pretrained( append_replace_return_docstrings( FlaxVisionTextDualEncoderModel, output_type=FlaxCLIPOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = ["FlaxVisionTextDualEncoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py index 077b452f70f2..bb1808aece91 100644 --- a/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py @@ -619,3 +619,6 @@ def dummy_inputs(self): pixel_values = tf.constant(VISION_DUMMY_INPUTS) dummy = {"pixel_values": pixel_values, "input_ids": input_ids} return dummy + + +__all__ = ["TFVisionTextDualEncoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py index 5b90faa8862c..ca4ad9b47ccc 100755 --- a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py @@ -532,3 +532,6 @@ def from_vision_text_pretrained( ) return model + + +__all__ = ["VisionTextDualEncoderModel"] diff --git a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py index 0d723ed10bf0..7ba82a131d3a 100644 --- a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py @@ -148,3 +148,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["VisionTextDualEncoderProcessor"] diff --git a/src/transformers/models/visual_bert/__init__.py b/src/transformers/models/visual_bert/__init__.py index db74a924a85c..f6b5e84a3200 100644 --- a/src/transformers/models/visual_bert/__init__.py +++ b/src/transformers/models/visual_bert/__init__.py @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = {"configuration_visual_bert": ["VisualBertConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_visual_bert"] = [ - "VisualBertForMultipleChoice", - "VisualBertForPreTraining", - "VisualBertForQuestionAnswering", - "VisualBertForRegionToPhraseAlignment", - "VisualBertForVisualReasoning", - "VisualBertLayer", - "VisualBertModel", - "VisualBertPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_visual_bert import VisualBertConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_visual_bert import ( - VisualBertForMultipleChoice, - VisualBertForPreTraining, - VisualBertForQuestionAnswering, - VisualBertForRegionToPhraseAlignment, - VisualBertForVisualReasoning, - VisualBertLayer, - VisualBertModel, - VisualBertPreTrainedModel, - ) - - + from .configuration_visual_bert import * + from .modeling_visual_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/visual_bert/configuration_visual_bert.py b/src/transformers/models/visual_bert/configuration_visual_bert.py index ae98229a7d98..a866227d3470 100644 --- a/src/transformers/models/visual_bert/configuration_visual_bert.py +++ b/src/transformers/models/visual_bert/configuration_visual_bert.py @@ -130,3 +130,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.bypass_transformer = bypass_transformer self.special_visual_initialize = special_visual_initialize + + +__all__ = ["VisualBertConfig"] diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index f7280c0c492c..a9ad99bf633b 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -1582,3 +1582,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "VisualBertPreTrainedModel", + "VisualBertModel", + "VisualBertForPreTraining", + "VisualBertForMultipleChoice", + "VisualBertForQuestionAnswering", + "VisualBertForVisualReasoning", + "VisualBertForRegionToPhraseAlignment", +] diff --git a/src/transformers/models/vit/__init__.py b/src/transformers/models/vit/__init__.py index 3066331278e4..eac28c3ca1e6 100644 --- a/src/transformers/models/vit/__init__.py +++ b/src/transformers/models/vit/__init__.py @@ -13,125 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, - is_torchvision_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vit": ["ViTConfig", "ViTOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_vit"] = ["ViTFeatureExtractor"] - _import_structure["image_processing_vit"] = ["ViTImageProcessor"] - - -try: - if not is_torchvision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vit_fast"] = ["ViTImageProcessorFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit"] = [ - "ViTForImageClassification", - "ViTForMaskedImageModeling", - "ViTModel", - "ViTPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vit"] = [ - "TFViTForImageClassification", - "TFViTModel", - "TFViTPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_vit"] = [ - "FlaxViTForImageClassification", - "FlaxViTModel", - "FlaxViTPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vit import ViTConfig, ViTOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_vit import ViTFeatureExtractor - from .image_processing_vit import ViTImageProcessor - - try: - if not is_torchvision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vit_fast import ViTImageProcessorFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit import ( - ViTForImageClassification, - ViTForMaskedImageModeling, - ViTModel, - ViTPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel - - + from .configuration_vit import * + from .feature_extraction_vit import * + from .image_processing_vit import * + from .modeling_flax_vit import * + from .modeling_tf_vit import * + from .modeling_vit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vit/configuration_vit.py b/src/transformers/models/vit/configuration_vit.py index bacec851a931..bb8b908903fb 100644 --- a/src/transformers/models/vit/configuration_vit.py +++ b/src/transformers/models/vit/configuration_vit.py @@ -136,3 +136,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: @property def atol_for_validation(self) -> float: return 1e-4 + + +__all__ = ["ViTConfig", "ViTOnnxConfig"] diff --git a/src/transformers/models/vit/feature_extraction_vit.py b/src/transformers/models/vit/feature_extraction_vit.py index 54d47c0f3ad5..ac700578b7de 100644 --- a/src/transformers/models/vit/feature_extraction_vit.py +++ b/src/transformers/models/vit/feature_extraction_vit.py @@ -17,12 +17,14 @@ import warnings from ...utils import logging +from ...utils.import_utils import export from .image_processing_vit import ViTImageProcessor logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ViTFeatureExtractor(ViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -31,3 +33,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["ViTFeatureExtractor"] diff --git a/src/transformers/models/vit/image_processing_vit.py b/src/transformers/models/vit/image_processing_vit.py index 7c0d8abefd8b..49df7d120c7c 100644 --- a/src/transformers/models/vit/image_processing_vit.py +++ b/src/transformers/models/vit/image_processing_vit.py @@ -34,11 +34,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class ViTImageProcessor(BaseImageProcessor): r""" Constructs a ViT image processor. @@ -270,3 +272,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["ViTImageProcessor"] diff --git a/src/transformers/models/vit/modeling_flax_vit.py b/src/transformers/models/vit/modeling_flax_vit.py index 586c8b62f6da..b2ceb3bad130 100644 --- a/src/transformers/models/vit/modeling_flax_vit.py +++ b/src/transformers/models/vit/modeling_flax_vit.py @@ -671,3 +671,5 @@ class FlaxViTForImageClassification(FlaxViTPreTrainedModel): append_replace_return_docstrings( FlaxViTForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=ViTConfig ) + +__all__ = ["FlaxViTPreTrainedModel", "FlaxViTModel", "FlaxViTForImageClassification"] diff --git a/src/transformers/models/vit/modeling_tf_vit.py b/src/transformers/models/vit/modeling_tf_vit.py index 2cf120df7408..44d28b0f3d9a 100644 --- a/src/transformers/models/vit/modeling_tf_vit.py +++ b/src/transformers/models/vit/modeling_tf_vit.py @@ -902,3 +902,6 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) + + +__all__ = ["TFViTPreTrainedModel", "TFViTModel", "TFViTForImageClassification", "TFViTMainLayer"] diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index 76ebd18ed32d..8e2babc5eb4e 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -885,3 +885,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTPreTrainedModel", "ViTModel", "ViTForMaskedImageModeling", "ViTForImageClassification"] diff --git a/src/transformers/models/vit_mae/__init__.py b/src/transformers/models/vit_mae/__init__.py index f5360061762e..66969d74b1ca 100644 --- a/src/transformers/models/vit_mae/__init__.py +++ b/src/transformers/models/vit_mae/__init__.py @@ -13,68 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vit_mae": ["ViTMAEConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit_mae"] = [ - "ViTMAEForPreTraining", - "ViTMAELayer", - "ViTMAEModel", - "ViTMAEPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_vit_mae"] = [ - "TFViTMAEForPreTraining", - "TFViTMAEModel", - "TFViTMAEPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vit_mae import ViTMAEConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit_mae import ( - ViTMAEForPreTraining, - ViTMAELayer, - ViTMAEModel, - ViTMAEPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel - - + from .configuration_vit_mae import * + from .modeling_tf_vit_mae import * + from .modeling_vit_mae import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vit_mae/configuration_vit_mae.py b/src/transformers/models/vit_mae/configuration_vit_mae.py index d20b5af13015..2c5ec3600599 100644 --- a/src/transformers/models/vit_mae/configuration_vit_mae.py +++ b/src/transformers/models/vit_mae/configuration_vit_mae.py @@ -135,3 +135,6 @@ def __init__( self.decoder_intermediate_size = decoder_intermediate_size self.mask_ratio = mask_ratio self.norm_pix_loss = norm_pix_loss + + +__all__ = ["ViTMAEConfig"] diff --git a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py index 5760dbf1efb6..28d5efe45c64 100644 --- a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py @@ -1370,3 +1370,6 @@ def build(self, input_shape=None): if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) + + +__all__ = ["TFViTMAEPreTrainedModel", "TFViTMAEModel", "TFViTMAEForPreTraining", "TFViTMAEMainLayer"] diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index f6444999ac12..64af154afcc7 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -1167,3 +1167,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTMAEPreTrainedModel", "ViTMAEModel", "ViTMAEForPreTraining"] diff --git a/src/transformers/models/vit_msn/__init__.py b/src/transformers/models/vit_msn/__init__.py index 88f7ff73d29b..9279a951153f 100644 --- a/src/transformers/models/vit_msn/__init__.py +++ b/src/transformers/models/vit_msn/__init__.py @@ -13,39 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vit_msn": ["ViTMSNConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vit_msn"] = [ - "ViTMSNModel", - "ViTMSNForImageClassification", - "ViTMSNPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vit_msn import ViTMSNConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vit_msn import ( - ViTMSNForImageClassification, - ViTMSNModel, - ViTMSNPreTrainedModel, - ) - + from .configuration_vit_msn import * + from .modeling_vit_msn import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vit_msn/configuration_vit_msn.py b/src/transformers/models/vit_msn/configuration_vit_msn.py index 7cf4414f8d73..cd47df3e9932 100644 --- a/src/transformers/models/vit_msn/configuration_vit_msn.py +++ b/src/transformers/models/vit_msn/configuration_vit_msn.py @@ -110,3 +110,6 @@ def __init__( self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias + + +__all__ = ["ViTMSNConfig"] diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index b962ac597dab..bfae54422767 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -740,3 +740,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["ViTMSNPreTrainedModel", "ViTMSNModel", "ViTMSNForImageClassification"] diff --git a/src/transformers/models/vitdet/__init__.py b/src/transformers/models/vitdet/__init__.py index a7ee9c755ff1..12b3dbbaf5e1 100644 --- a/src/transformers/models/vitdet/__init__.py +++ b/src/transformers/models/vitdet/__init__.py @@ -13,43 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vitdet": ["VitDetConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vitdet"] = [ - "VitDetModel", - "VitDetPreTrainedModel", - "VitDetBackbone", - ] - if TYPE_CHECKING: - from .configuration_vitdet import VitDetConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vitdet import ( - VitDetBackbone, - VitDetModel, - VitDetPreTrainedModel, - ) - + from .configuration_vitdet import * + from .modeling_vitdet import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vitdet/configuration_vitdet.py b/src/transformers/models/vitdet/configuration_vitdet.py index 856f228a5b4b..cd91dce9b296 100644 --- a/src/transformers/models/vitdet/configuration_vitdet.py +++ b/src/transformers/models/vitdet/configuration_vitdet.py @@ -151,3 +151,6 @@ def __init__( self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) + + +__all__ = ["VitDetConfig"] diff --git a/src/transformers/models/vitdet/modeling_vitdet.py b/src/transformers/models/vitdet/modeling_vitdet.py index 40edb6a05c68..7e4f48a7ea25 100644 --- a/src/transformers/models/vitdet/modeling_vitdet.py +++ b/src/transformers/models/vitdet/modeling_vitdet.py @@ -872,3 +872,6 @@ def forward( hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) + + +__all__ = ["VitDetPreTrainedModel", "VitDetModel", "VitDetBackbone"] diff --git a/src/transformers/models/vitmatte/__init__.py b/src/transformers/models/vitmatte/__init__.py index 7745a96cc6d5..d77bc0b6dac8 100644 --- a/src/transformers/models/vitmatte/__init__.py +++ b/src/transformers/models/vitmatte/__init__.py @@ -13,58 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_torch_available, - is_vision_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_vitmatte": ["VitMatteConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vitmatte"] = ["VitMatteImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vitmatte"] = [ - "VitMattePreTrainedModel", - "VitMatteForImageMatting", - ] - if TYPE_CHECKING: - from .configuration_vitmatte import VitMatteConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vitmatte import VitMatteImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vitmatte import ( - VitMatteForImageMatting, - VitMattePreTrainedModel, - ) - + from .configuration_vitmatte import * + from .image_processing_vitmatte import * + from .modeling_vitmatte import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vitmatte/configuration_vitmatte.py b/src/transformers/models/vitmatte/configuration_vitmatte.py index 36e46764a006..b9f78043306b 100644 --- a/src/transformers/models/vitmatte/configuration_vitmatte.py +++ b/src/transformers/models/vitmatte/configuration_vitmatte.py @@ -131,3 +131,6 @@ def to_dict(self): output["backbone_config"] = self.backbone_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +__all__ = ["VitMatteConfig"] diff --git a/src/transformers/models/vitmatte/image_processing_vitmatte.py b/src/transformers/models/vitmatte/image_processing_vitmatte.py index 599442267822..1941c1a379c8 100644 --- a/src/transformers/models/vitmatte/image_processing_vitmatte.py +++ b/src/transformers/models/vitmatte/image_processing_vitmatte.py @@ -34,11 +34,13 @@ validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) +@export(backends=("vision",)) class VitMatteImageProcessor(BaseImageProcessor): r""" Constructs a ViTMatte image processor. @@ -267,3 +269,6 @@ def preprocess( data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["VitMatteImageProcessor"] diff --git a/src/transformers/models/vitmatte/modeling_vitmatte.py b/src/transformers/models/vitmatte/modeling_vitmatte.py index fb18ed6e789c..b27bc2887080 100644 --- a/src/transformers/models/vitmatte/modeling_vitmatte.py +++ b/src/transformers/models/vitmatte/modeling_vitmatte.py @@ -336,3 +336,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["VitMattePreTrainedModel", "VitMatteForImageMatting"] diff --git a/src/transformers/models/vits/__init__.py b/src/transformers/models/vits/__init__.py index 14428463d28a..ac9f316c11e6 100644 --- a/src/transformers/models/vits/__init__.py +++ b/src/transformers/models/vits/__init__.py @@ -13,49 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_speech_available, - is_torch_available, -) +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_vits": ["VitsConfig"], - "tokenization_vits": ["VitsTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vits"] = [ - "VitsModel", - "VitsPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_vits import ( - VitsConfig, - ) - from .tokenization_vits import VitsTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vits import ( - VitsModel, - VitsPreTrainedModel, - ) - + from .configuration_vits import * + from .modeling_vits import * + from .tokenization_vits import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vits/configuration_vits.py b/src/transformers/models/vits/configuration_vits.py index 0f2aeb8ac445..6de2591b0f3a 100644 --- a/src/transformers/models/vits/configuration_vits.py +++ b/src/transformers/models/vits/configuration_vits.py @@ -248,3 +248,6 @@ def __init__( ) super().__init__(**kwargs) + + +__all__ = ["VitsConfig"] diff --git a/src/transformers/models/vits/modeling_vits.py b/src/transformers/models/vits/modeling_vits.py index d8dffd4376e0..9b54bb9e1eea 100644 --- a/src/transformers/models/vits/modeling_vits.py +++ b/src/transformers/models/vits/modeling_vits.py @@ -1478,3 +1478,6 @@ def forward( hidden_states=text_encoder_output.hidden_states, attentions=text_encoder_output.attentions, ) + + +__all__ = ["VitsPreTrainedModel", "VitsModel"] diff --git a/src/transformers/models/vits/tokenization_vits.py b/src/transformers/models/vits/tokenization_vits.py index b4d8af740375..ca40c80c124c 100644 --- a/src/transformers/models/vits/tokenization_vits.py +++ b/src/transformers/models/vits/tokenization_vits.py @@ -241,3 +241,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["VitsTokenizer"] diff --git a/src/transformers/models/vivit/__init__.py b/src/transformers/models/vivit/__init__.py index 261238edccbe..b565ff2a78f8 100644 --- a/src/transformers/models/vivit/__init__.py +++ b/src/transformers/models/vivit/__init__.py @@ -1,76 +1,18 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. from typing import TYPE_CHECKING -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_vivit": ["VivitConfig"], -} -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_vivit"] = ["VivitImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_vivit"] = [ - "VivitModel", - "VivitPreTrainedModel", - "VivitForVideoClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_vivit import VivitConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_vivit import VivitImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_vivit import ( - VivitForVideoClassification, - VivitModel, - VivitPreTrainedModel, - ) - - + from .modeling_vivit import * + from .configuration_vivit import * + from .image_processing_vivit import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/vivit/configuration_vivit.py b/src/transformers/models/vivit/configuration_vivit.py index 63895e4fb79f..42863454e81c 100644 --- a/src/transformers/models/vivit/configuration_vivit.py +++ b/src/transformers/models/vivit/configuration_vivit.py @@ -114,3 +114,6 @@ def __init__( self.qkv_bias = qkv_bias super().__init__(**kwargs) + + +__all__ = ["VivitConfig"] diff --git a/src/transformers/models/vivit/image_processing_vivit.py b/src/transformers/models/vivit/image_processing_vivit.py index 5f251bbd1b95..00044bd92522 100644 --- a/src/transformers/models/vivit/image_processing_vivit.py +++ b/src/transformers/models/vivit/image_processing_vivit.py @@ -42,6 +42,7 @@ validate_preprocess_arguments, ) from ...utils import filter_out_non_signature_kwargs, logging +from ...utils.import_utils import export if is_vision_available(): @@ -63,6 +64,7 @@ def make_batched(videos) -> List[List[ImageInput]]: raise ValueError(f"Could not make batched video from {videos}") +@export(backends=("vision",)) class VivitImageProcessor(BaseImageProcessor): r""" Constructs a Vivit image processor. @@ -402,3 +404,6 @@ def preprocess( data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) + + +__all__ = ["VivitImageProcessor"] diff --git a/src/transformers/models/vivit/modeling_vivit.py b/src/transformers/models/vivit/modeling_vivit.py index 972040264fec..4b3f764f04fc 100755 --- a/src/transformers/models/vivit/modeling_vivit.py +++ b/src/transformers/models/vivit/modeling_vivit.py @@ -804,3 +804,6 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = ["VivitPreTrainedModel", "VivitModel", "VivitForVideoClassification"] diff --git a/src/transformers/models/wav2vec2/__init__.py b/src/transformers/models/wav2vec2/__init__.py index 06e1c6628db9..81ea00c40d04 100644 --- a/src/transformers/models/wav2vec2/__init__.py +++ b/src/transformers/models/wav2vec2/__init__.py @@ -13,118 +13,20 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_torch_available, -) - - -_import_structure = { - "configuration_wav2vec2": ["Wav2Vec2Config"], - "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], - "processing_wav2vec2": ["Wav2Vec2Processor"], - "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wav2vec2"] = [ - "Wav2Vec2ForAudioFrameClassification", - "Wav2Vec2ForCTC", - "Wav2Vec2ForMaskedLM", - "Wav2Vec2ForPreTraining", - "Wav2Vec2ForSequenceClassification", - "Wav2Vec2ForXVector", - "Wav2Vec2Model", - "Wav2Vec2PreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_wav2vec2"] = [ - "TFWav2Vec2ForCTC", - "TFWav2Vec2Model", - "TFWav2Vec2PreTrainedModel", - "TFWav2Vec2ForSequenceClassification", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_wav2vec2"] = [ - "FlaxWav2Vec2ForCTC", - "FlaxWav2Vec2ForPreTraining", - "FlaxWav2Vec2Model", - "FlaxWav2Vec2PreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_wav2vec2 import Wav2Vec2Config - from .feature_extraction_wav2vec2 import Wav2Vec2FeatureExtractor - from .processing_wav2vec2 import Wav2Vec2Processor - from .tokenization_wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2Tokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wav2vec2 import ( - Wav2Vec2ForAudioFrameClassification, - Wav2Vec2ForCTC, - Wav2Vec2ForMaskedLM, - Wav2Vec2ForPreTraining, - Wav2Vec2ForSequenceClassification, - Wav2Vec2ForXVector, - Wav2Vec2Model, - Wav2Vec2PreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_wav2vec2 import ( - TFWav2Vec2ForCTC, - TFWav2Vec2ForSequenceClassification, - TFWav2Vec2Model, - TFWav2Vec2PreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_wav2vec2 import ( - FlaxWav2Vec2ForCTC, - FlaxWav2Vec2ForPreTraining, - FlaxWav2Vec2Model, - FlaxWav2Vec2PreTrainedModel, - ) - - + from .configuration_wav2vec2 import * + from .feature_extraction_wav2vec2 import * + from .modeling_flax_wav2vec2 import * + from .modeling_tf_wav2vec2 import * + from .modeling_wav2vec2 import * + from .processing_wav2vec2 import * + from .tokenization_wav2vec2 import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2/configuration_wav2vec2.py b/src/transformers/models/wav2vec2/configuration_wav2vec2.py index b4a676ddba8f..c28aa6305f85 100644 --- a/src/transformers/models/wav2vec2/configuration_wav2vec2.py +++ b/src/transformers/models/wav2vec2/configuration_wav2vec2.py @@ -342,3 +342,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["Wav2Vec2Config"] diff --git a/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py b/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py index e5266c67ded6..f76d98309406 100644 --- a/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py +++ b/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py @@ -238,3 +238,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["Wav2Vec2FeatureExtractor"] diff --git a/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py index 9a24b9d39fda..6cbd8b01aa9b 100644 --- a/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py @@ -1423,3 +1423,5 @@ def __call__( append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config ) + +__all__ = ["FlaxWav2Vec2PreTrainedModel", "FlaxWav2Vec2Model", "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining"] diff --git a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py index a8338e363d94..4b9a738e0319 100644 --- a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py @@ -1853,3 +1853,12 @@ def build(self, input_shape=None): if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.classifier_proj_size]) + + +__all__ = [ + "TFWav2Vec2PreTrainedModel", + "TFWav2Vec2Model", + "TFWav2Vec2ForCTC", + "TFWav2Vec2ForSequenceClassification", + "TFWav2Vec2MainLayer", +] diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index f1d021b58ee5..783da992c3c0 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -2714,3 +2714,15 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Wav2Vec2PreTrainedModel", + "Wav2Vec2Model", + "Wav2Vec2ForPreTraining", + "Wav2Vec2ForMaskedLM", + "Wav2Vec2ForCTC", + "Wav2Vec2ForSequenceClassification", + "Wav2Vec2ForAudioFrameClassification", + "Wav2Vec2ForXVector", +] diff --git a/src/transformers/models/wav2vec2/processing_wav2vec2.py b/src/transformers/models/wav2vec2/processing_wav2vec2.py index 6fe960c78eb1..bb751b013cd1 100644 --- a/src/transformers/models/wav2vec2/processing_wav2vec2.py +++ b/src/transformers/models/wav2vec2/processing_wav2vec2.py @@ -164,3 +164,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Wav2Vec2Processor"] diff --git a/src/transformers/models/wav2vec2/tokenization_wav2vec2.py b/src/transformers/models/wav2vec2/tokenization_wav2vec2.py index 647b18521d05..df82eca32ee8 100644 --- a/src/transformers/models/wav2vec2/tokenization_wav2vec2.py +++ b/src/transformers/models/wav2vec2/tokenization_wav2vec2.py @@ -913,3 +913,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"] diff --git a/src/transformers/models/wav2vec2_bert/__init__.py b/src/transformers/models/wav2vec2_bert/__init__.py index be37038211a8..7520263c51bc 100644 --- a/src/transformers/models/wav2vec2_bert/__init__.py +++ b/src/transformers/models/wav2vec2_bert/__init__.py @@ -13,52 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_wav2vec2_bert": ["Wav2Vec2BertConfig"], - "processing_wav2vec2_bert": ["Wav2Vec2BertProcessor"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wav2vec2_bert"] = [ - "Wav2Vec2BertForAudioFrameClassification", - "Wav2Vec2BertForCTC", - "Wav2Vec2BertForSequenceClassification", - "Wav2Vec2BertForXVector", - "Wav2Vec2BertModel", - "Wav2Vec2BertPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_wav2vec2_bert import ( - Wav2Vec2BertConfig, - ) - from .processing_wav2vec2_bert import Wav2Vec2BertProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wav2vec2_bert import ( - Wav2Vec2BertForAudioFrameClassification, - Wav2Vec2BertForCTC, - Wav2Vec2BertForSequenceClassification, - Wav2Vec2BertForXVector, - Wav2Vec2BertModel, - Wav2Vec2BertPreTrainedModel, - ) - + from .configuration_wav2vec2_bert import * + from .modeling_wav2vec2_bert import * + from .processing_wav2vec2_bert import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py index 20b4e4fa1306..db52cc5baed3 100644 --- a/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py @@ -308,3 +308,6 @@ def inputs_to_logits_ratio(self): if self.add_adapter: ratio = ratio * (self.adapter_stride**self.num_adapter_layers) return ratio + + +__all__ = ["Wav2Vec2BertConfig"] diff --git a/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py index ebbf700bf9ef..917f77941c91 100644 --- a/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py @@ -1665,3 +1665,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Wav2Vec2BertPreTrainedModel", + "Wav2Vec2BertModel", + "Wav2Vec2BertForCTC", + "Wav2Vec2BertForSequenceClassification", + "Wav2Vec2BertForAudioFrameClassification", + "Wav2Vec2BertForXVector", +] diff --git a/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py b/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py index d24c672007d7..9a76535a0b98 100644 --- a/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +++ b/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py @@ -144,3 +144,6 @@ def decode(self, *args, **kwargs): to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) + + +__all__ = ["Wav2Vec2BertProcessor"] diff --git a/src/transformers/models/wav2vec2_conformer/__init__.py b/src/transformers/models/wav2vec2_conformer/__init__.py index a780a50b6cce..f3660f66fecc 100644 --- a/src/transformers/models/wav2vec2_conformer/__init__.py +++ b/src/transformers/models/wav2vec2_conformer/__init__.py @@ -13,52 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_wav2vec2_conformer": ["Wav2Vec2ConformerConfig"], -} - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wav2vec2_conformer"] = [ - "Wav2Vec2ConformerForAudioFrameClassification", - "Wav2Vec2ConformerForCTC", - "Wav2Vec2ConformerForPreTraining", - "Wav2Vec2ConformerForSequenceClassification", - "Wav2Vec2ConformerForXVector", - "Wav2Vec2ConformerModel", - "Wav2Vec2ConformerPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_wav2vec2_conformer import ( - Wav2Vec2ConformerConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wav2vec2_conformer import ( - Wav2Vec2ConformerForAudioFrameClassification, - Wav2Vec2ConformerForCTC, - Wav2Vec2ConformerForPreTraining, - Wav2Vec2ConformerForSequenceClassification, - Wav2Vec2ConformerForXVector, - Wav2Vec2ConformerModel, - Wav2Vec2ConformerPreTrainedModel, - ) - + from .configuration_wav2vec2_conformer import * + from .modeling_wav2vec2_conformer import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py index 8f78aa937535..73a840b9f824 100644 --- a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py @@ -355,3 +355,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["Wav2Vec2ConformerConfig"] diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index c37dd980d4ed..71086318d878 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -2112,3 +2112,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "Wav2Vec2ConformerPreTrainedModel", + "Wav2Vec2ConformerModel", + "Wav2Vec2ConformerForPreTraining", + "Wav2Vec2ConformerForCTC", + "Wav2Vec2ConformerForSequenceClassification", + "Wav2Vec2ConformerForAudioFrameClassification", + "Wav2Vec2ConformerForXVector", +] diff --git a/src/transformers/models/wav2vec2_phoneme/__init__.py b/src/transformers/models/wav2vec2_phoneme/__init__.py index 7859f381dd51..735d2092a7ce 100644 --- a/src/transformers/models/wav2vec2_phoneme/__init__.py +++ b/src/transformers/models/wav2vec2_phoneme/__init__.py @@ -14,14 +14,13 @@ from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .tokenization_wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer + from .tokenization_wav2vec2_phoneme import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py b/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py index ff4704c778c0..b617b17d02b9 100644 --- a/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +++ b/src/transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py @@ -575,3 +575,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) + + +__all__ = ["Wav2Vec2PhonemeCTCTokenizer"] diff --git a/src/transformers/models/wav2vec2_with_lm/__init__.py b/src/transformers/models/wav2vec2_with_lm/__init__.py index 611688f6a683..30c00624d4d5 100644 --- a/src/transformers/models/wav2vec2_with_lm/__init__.py +++ b/src/transformers/models/wav2vec2_with_lm/__init__.py @@ -14,14 +14,13 @@ from typing import TYPE_CHECKING from ...utils import _LazyModule - - -_import_structure = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .processing_wav2vec2_with_lm import Wav2Vec2ProcessorWithLM + from .processing_wav2vec2_with_lm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py index 0081008009e3..f569b4f625e7 100644 --- a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +++ b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py @@ -653,3 +653,6 @@ def as_target_processor(self): yield self.current_processor = self.feature_extractor self._in_target_context_manager = False + + +__all__ = ["Wav2Vec2ProcessorWithLM"] diff --git a/src/transformers/models/wavlm/__init__.py b/src/transformers/models/wavlm/__init__.py index d615a3a5ae40..c139914a9c2e 100644 --- a/src/transformers/models/wavlm/__init__.py +++ b/src/transformers/models/wavlm/__init__.py @@ -13,45 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = {"configuration_wavlm": ["WavLMConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_wavlm"] = [ - "WavLMForAudioFrameClassification", - "WavLMForCTC", - "WavLMForSequenceClassification", - "WavLMForXVector", - "WavLMModel", - "WavLMPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_wavlm import WavLMConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_wavlm import ( - WavLMForAudioFrameClassification, - WavLMForCTC, - WavLMForSequenceClassification, - WavLMForXVector, - WavLMModel, - WavLMPreTrainedModel, - ) - + from .configuration_wavlm import * + from .modeling_wavlm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/wavlm/configuration_wavlm.py b/src/transformers/models/wavlm/configuration_wavlm.py index 3faeb7ab53b2..63cd44e12b0f 100644 --- a/src/transformers/models/wavlm/configuration_wavlm.py +++ b/src/transformers/models/wavlm/configuration_wavlm.py @@ -332,3 +332,6 @@ def __init__( @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) + + +__all__ = ["WavLMConfig"] diff --git a/src/transformers/models/wavlm/modeling_wavlm.py b/src/transformers/models/wavlm/modeling_wavlm.py index fa5fd390f564..0fbb444946bb 100755 --- a/src/transformers/models/wavlm/modeling_wavlm.py +++ b/src/transformers/models/wavlm/modeling_wavlm.py @@ -1849,3 +1849,13 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "WavLMPreTrainedModel", + "WavLMModel", + "WavLMForCTC", + "WavLMForSequenceClassification", + "WavLMForAudioFrameClassification", + "WavLMForXVector", +] diff --git a/src/transformers/models/whisper/__init__.py b/src/transformers/models/whisper/__init__.py index 5d37e72c02b5..58e0f17a1ce2 100644 --- a/src/transformers/models/whisper/__init__.py +++ b/src/transformers/models/whisper/__init__.py @@ -13,125 +13,21 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_whisper": ["WhisperConfig", "WhisperOnnxConfig"], - "feature_extraction_whisper": ["WhisperFeatureExtractor"], - "processing_whisper": ["WhisperProcessor"], - "tokenization_whisper": ["WhisperTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_whisper_fast"] = ["WhisperTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_whisper"] = [ - "WhisperForCausalLM", - "WhisperForConditionalGeneration", - "WhisperModel", - "WhisperPreTrainedModel", - "WhisperForAudioClassification", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_whisper"] = [ - "TFWhisperForConditionalGeneration", - "TFWhisperModel", - "TFWhisperPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_whisper"] = [ - "FlaxWhisperForConditionalGeneration", - "FlaxWhisperModel", - "FlaxWhisperPreTrainedModel", - "FlaxWhisperForAudioClassification", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_whisper import WhisperConfig, WhisperOnnxConfig - from .feature_extraction_whisper import WhisperFeatureExtractor - from .processing_whisper import WhisperProcessor - from .tokenization_whisper import WhisperTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_whisper_fast import WhisperTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_whisper import ( - WhisperForAudioClassification, - WhisperForCausalLM, - WhisperForConditionalGeneration, - WhisperModel, - WhisperPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_whisper import ( - TFWhisperForConditionalGeneration, - TFWhisperModel, - TFWhisperPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_whisper import ( - FlaxWhisperForAudioClassification, - FlaxWhisperForConditionalGeneration, - FlaxWhisperModel, - FlaxWhisperPreTrainedModel, - ) - + from .configuration_whisper import * + from .feature_extraction_whisper import * + from .modeling_flax_whisper import * + from .modeling_tf_whisper import * + from .modeling_whisper import * + from .processing_whisper import * + from .tokenization_whisper import * + from .tokenization_whisper_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/whisper/configuration_whisper.py b/src/transformers/models/whisper/configuration_whisper.py index d65811cbc8ef..3f7991215a85 100644 --- a/src/transformers/models/whisper/configuration_whisper.py +++ b/src/transformers/models/whisper/configuration_whisper.py @@ -342,3 +342,6 @@ def generate_dummy_inputs( @property def atol_for_validation(self) -> float: return 1e-3 + + +__all__ = ["WhisperConfig", "WhisperOnnxConfig"] diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py index a79eeedd0a29..9ebb087396ed 100644 --- a/src/transformers/models/whisper/feature_extraction_whisper.py +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -20,11 +20,11 @@ import numpy as np -from ... import is_torch_available from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging +from ...utils.import_utils import is_torch_available if is_torch_available(): @@ -322,3 +322,6 @@ def __call__( padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs + + +__all__ = ["WhisperFeatureExtractor"] diff --git a/src/transformers/models/whisper/modeling_flax_whisper.py b/src/transformers/models/whisper/modeling_flax_whisper.py index cc4483963c63..1aae2cf50d34 100644 --- a/src/transformers/models/whisper/modeling_flax_whisper.py +++ b/src/transformers/models/whisper/modeling_flax_whisper.py @@ -1694,3 +1694,10 @@ def __call__( append_replace_return_docstrings( FlaxWhisperForAudioClassification, output_type=FlaxSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC ) + +__all__ = [ + "FlaxWhisperPreTrainedModel", + "FlaxWhisperModel", + "FlaxWhisperForConditionalGeneration", + "FlaxWhisperForAudioClassification", +] diff --git a/src/transformers/models/whisper/modeling_tf_whisper.py b/src/transformers/models/whisper/modeling_tf_whisper.py index 18f55dce8a22..4288fefd052e 100644 --- a/src/transformers/models/whisper/modeling_tf_whisper.py +++ b/src/transformers/models/whisper/modeling_tf_whisper.py @@ -1756,3 +1756,6 @@ def build(self, input_shape=None): if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) + + +__all__ = ["TFWhisperPreTrainedModel", "TFWhisperModel", "TFWhisperForConditionalGeneration", "TFWhisperMainLayer"] diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index 81f60edbfa98..fb9b5098ca27 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -2272,3 +2272,12 @@ def forward( hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) + + +__all__ = [ + "WhisperPreTrainedModel", + "WhisperModel", + "WhisperForConditionalGeneration", + "WhisperForCausalLM", + "WhisperForAudioClassification", +] diff --git a/src/transformers/models/whisper/processing_whisper.py b/src/transformers/models/whisper/processing_whisper.py index f22aae143e6b..ad5fa22e370f 100644 --- a/src/transformers/models/whisper/processing_whisper.py +++ b/src/transformers/models/whisper/processing_whisper.py @@ -95,3 +95,6 @@ def decode(self, *args, **kwargs): def get_prompt_ids(self, text: str, return_tensors="np"): return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors) + + +__all__ = ["WhisperProcessor"] diff --git a/src/transformers/models/whisper/tokenization_whisper.py b/src/transformers/models/whisper/tokenization_whisper.py index 5cfd2300346e..53b4599f6f1d 100644 --- a/src/transformers/models/whisper/tokenization_whisper.py +++ b/src/transformers/models/whisper/tokenization_whisper.py @@ -1366,3 +1366,6 @@ def _merge_punctuations(words, tokens, indices, prepended, appended): words[:] = [word for word in words if word] tokens[:] = [token for token in tokens if token] indices[:] = [idx for idx in indices if idx] + + +__all__ = ["WhisperTokenizer"] diff --git a/src/transformers/models/whisper/tokenization_whisper_fast.py b/src/transformers/models/whisper/tokenization_whisper_fast.py index 6b6fb3a19900..809c2d44db58 100644 --- a/src/transformers/models/whisper/tokenization_whisper_fast.py +++ b/src/transformers/models/whisper/tokenization_whisper_fast.py @@ -615,3 +615,6 @@ def _convert_to_list(token_ids): if isinstance(token_ids, np.ndarray): token_ids = token_ids.tolist() return token_ids + + +__all__ = ["WhisperTokenizerFast"] diff --git a/src/transformers/models/x_clip/__init__.py b/src/transformers/models/x_clip/__init__.py index 2f60ad0ddee2..b12171f84d61 100644 --- a/src/transformers/models/x_clip/__init__.py +++ b/src/transformers/models/x_clip/__init__.py @@ -13,53 +13,16 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_x_clip": [ - "XCLIPConfig", - "XCLIPTextConfig", - "XCLIPVisionConfig", - ], - "processing_x_clip": ["XCLIPProcessor"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_x_clip"] = [ - "XCLIPModel", - "XCLIPPreTrainedModel", - "XCLIPTextModel", - "XCLIPVisionModel", - ] - if TYPE_CHECKING: - from .configuration_x_clip import ( - XCLIPConfig, - XCLIPTextConfig, - XCLIPVisionConfig, - ) - from .processing_x_clip import XCLIPProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_x_clip import ( - XCLIPModel, - XCLIPPreTrainedModel, - XCLIPTextModel, - XCLIPVisionModel, - ) - + from .configuration_x_clip import * + from .modeling_x_clip import * + from .processing_x_clip import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/x_clip/configuration_x_clip.py b/src/transformers/models/x_clip/configuration_x_clip.py index 827046b6c353..afc86c5d9826 100644 --- a/src/transformers/models/x_clip/configuration_x_clip.py +++ b/src/transformers/models/x_clip/configuration_x_clip.py @@ -412,3 +412,6 @@ def from_text_vision_configs(cls, text_config: XCLIPTextConfig, vision_config: X """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +__all__ = ["XCLIPTextConfig", "XCLIPVisionConfig", "XCLIPConfig"] diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index 791e501d1737..f045752bdefa 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -1619,3 +1619,6 @@ def forward( vision_model_output=vision_outputs, mit_output=mit_outputs, ) + + +__all__ = ["XCLIPPreTrainedModel", "XCLIPTextModel", "XCLIPVisionModel", "XCLIPModel"] diff --git a/src/transformers/models/x_clip/processing_x_clip.py b/src/transformers/models/x_clip/processing_x_clip.py index a11aeb18dc4f..4a17d3a15a20 100644 --- a/src/transformers/models/x_clip/processing_x_clip.py +++ b/src/transformers/models/x_clip/processing_x_clip.py @@ -146,3 +146,6 @@ def feature_extractor(self): FutureWarning, ) return self.image_processor + + +__all__ = ["XCLIPProcessor"] diff --git a/src/transformers/models/xglm/__init__.py b/src/transformers/models/xglm/__init__.py index 59bba032f4ea..07c7c10d8ab4 100644 --- a/src/transformers/models/xglm/__init__.py +++ b/src/transformers/models/xglm/__init__.py @@ -13,123 +13,19 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_xglm": ["XGLMConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xglm"] = ["XGLMTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xglm_fast"] = ["XGLMTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xglm"] = [ - "XGLMForCausalLM", - "XGLMModel", - "XGLMPreTrainedModel", - ] - - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_xglm"] = [ - "FlaxXGLMForCausalLM", - "FlaxXGLMModel", - "FlaxXGLMPreTrainedModel", - ] - - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xglm"] = [ - "TFXGLMForCausalLM", - "TFXGLMModel", - "TFXGLMPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xglm import XGLMConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xglm import XGLMTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xglm_fast import XGLMTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xglm import XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xglm import ( - TFXGLMForCausalLM, - TFXGLMModel, - TFXGLMPreTrainedModel, - ) - - + from .configuration_xglm import * + from .modeling_flax_xglm import * + from .modeling_tf_xglm import * + from .modeling_xglm import * + from .tokenization_xglm import * + from .tokenization_xglm_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xglm/configuration_xglm.py b/src/transformers/models/xglm/configuration_xglm.py index c5a275405d77..da5ded491623 100644 --- a/src/transformers/models/xglm/configuration_xglm.py +++ b/src/transformers/models/xglm/configuration_xglm.py @@ -134,3 +134,6 @@ def __init__( decoder_start_token_id=decoder_start_token_id, **kwargs, ) + + +__all__ = ["XGLMConfig"] diff --git a/src/transformers/models/xglm/modeling_flax_xglm.py b/src/transformers/models/xglm/modeling_flax_xglm.py index 473448c66ccc..df9d52e59eaf 100644 --- a/src/transformers/models/xglm/modeling_flax_xglm.py +++ b/src/transformers/models/xglm/modeling_flax_xglm.py @@ -798,3 +798,5 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = ["FlaxXGLMPreTrainedModel", "FlaxXGLMModel", "FlaxXGLMForCausalLM"] diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index a62396b79c0e..d5c4f4f0cd60 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -1003,3 +1003,6 @@ def tf_to_pt_weight_rename(self, tf_weight): return tf_weight, "model.embed_tokens.weight" else: return (tf_weight,) + + +__all__ = ["TFXGLMPreTrainedModel", "TFXGLMModel", "TFXGLMForCausalLM", "TFXGLMMainLayer"] diff --git a/src/transformers/models/xglm/modeling_xglm.py b/src/transformers/models/xglm/modeling_xglm.py index 4f1693583494..1ec7b7799732 100755 --- a/src/transformers/models/xglm/modeling_xglm.py +++ b/src/transformers/models/xglm/modeling_xglm.py @@ -842,3 +842,6 @@ def _reorder_cache(past_key_values, beam_idx): tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + +__all__ = ["XGLMPreTrainedModel", "XGLMModel", "XGLMForCausalLM"] diff --git a/src/transformers/models/xglm/tokenization_xglm.py b/src/transformers/models/xglm/tokenization_xglm.py index 8713d5f129d1..031ad545734e 100644 --- a/src/transformers/models/xglm/tokenization_xglm.py +++ b/src/transformers/models/xglm/tokenization_xglm.py @@ -22,6 +22,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -31,6 +32,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} +@export(backends=("sentencepiece",)) class XGLMTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on @@ -295,3 +297,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["XGLMTokenizer"] diff --git a/src/transformers/models/xglm/tokenization_xglm_fast.py b/src/transformers/models/xglm/tokenization_xglm_fast.py index 2f8b0480c82d..92d99d2f863d 100644 --- a/src/transformers/models/xglm/tokenization_xglm_fast.py +++ b/src/transformers/models/xglm/tokenization_xglm_fast.py @@ -191,3 +191,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["XGLMTokenizerFast"] diff --git a/src/transformers/models/xlm/__init__.py b/src/transformers/models/xlm/__init__.py index 97d0933b8b9a..f4adecf8d3ef 100644 --- a/src/transformers/models/xlm/__init__.py +++ b/src/transformers/models/xlm/__init__.py @@ -11,91 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available - - -_import_structure = { - "configuration_xlm": ["XLMConfig", "XLMOnnxConfig"], - "tokenization_xlm": ["XLMTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm"] = [ - "XLMForMultipleChoice", - "XLMForQuestionAnswering", - "XLMForQuestionAnsweringSimple", - "XLMForSequenceClassification", - "XLMForTokenClassification", - "XLMModel", - "XLMPreTrainedModel", - "XLMWithLMHeadModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xlm"] = [ - "TFXLMForMultipleChoice", - "TFXLMForQuestionAnsweringSimple", - "TFXLMForSequenceClassification", - "TFXLMForTokenClassification", - "TFXLMMainLayer", - "TFXLMModel", - "TFXLMPreTrainedModel", - "TFXLMWithLMHeadModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xlm import XLMConfig, XLMOnnxConfig - from .tokenization_xlm import XLMTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm import ( - XLMForMultipleChoice, - XLMForQuestionAnswering, - XLMForQuestionAnsweringSimple, - XLMForSequenceClassification, - XLMForTokenClassification, - XLMModel, - XLMPreTrainedModel, - XLMWithLMHeadModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xlm import ( - TFXLMForMultipleChoice, - TFXLMForQuestionAnsweringSimple, - TFXLMForSequenceClassification, - TFXLMForTokenClassification, - TFXLMMainLayer, - TFXLMModel, - TFXLMPreTrainedModel, - TFXLMWithLMHeadModel, - ) - + from .configuration_xlm import * + from .modeling_tf_xlm import * + from .modeling_xlm import * + from .tokenization_xlm import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlm/configuration_xlm.py b/src/transformers/models/xlm/configuration_xlm.py index 39db12c51b03..ebd2c290f38f 100644 --- a/src/transformers/models/xlm/configuration_xlm.py +++ b/src/transformers/models/xlm/configuration_xlm.py @@ -236,3 +236,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("token_type_ids", dynamic_axis), ] ) + + +__all__ = ["XLMConfig", "XLMOnnxConfig"] diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index f03f416a0848..d7a0d4247472 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -1343,3 +1343,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFXLMPreTrainedModel", + "TFXLMModel", + "TFXLMWithLMHeadModel", + "TFXLMForSequenceClassification", + "TFXLMForMultipleChoice", + "TFXLMForTokenClassification", + "TFXLMForQuestionAnsweringSimple", + "TFXLMMainLayer", +] diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index 280383630987..250740071278 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -1259,3 +1259,15 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "XLMPreTrainedModel", + "XLMModel", + "XLMWithLMHeadModel", + "XLMForSequenceClassification", + "XLMForQuestionAnsweringSimple", + "XLMForQuestionAnswering", + "XLMForTokenClassification", + "XLMForMultipleChoice", +] diff --git a/src/transformers/models/xlm/tokenization_xlm.py b/src/transformers/models/xlm/tokenization_xlm.py index b20823e01715..14387279708e 100644 --- a/src/transformers/models/xlm/tokenization_xlm.py +++ b/src/transformers/models/xlm/tokenization_xlm.py @@ -601,3 +601,6 @@ def __setstate__(self, d): ) self.sm = sacremoses + + +__all__ = ["XLMTokenizer"] diff --git a/src/transformers/models/xlm_roberta/__init__.py b/src/transformers/models/xlm_roberta/__init__.py index 00658bb9ed9b..1f4731ec9916 100644 --- a/src/transformers/models/xlm_roberta/__init__.py +++ b/src/transformers/models/xlm_roberta/__init__.py @@ -11,168 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_xlm_roberta": [ - "XLMRobertaConfig", - "XLMRobertaOnnxConfig", - ], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlm_roberta"] = ["XLMRobertaTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlm_roberta_fast"] = ["XLMRobertaTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm_roberta"] = [ - "XLMRobertaForCausalLM", - "XLMRobertaForMaskedLM", - "XLMRobertaForMultipleChoice", - "XLMRobertaForQuestionAnswering", - "XLMRobertaForSequenceClassification", - "XLMRobertaForTokenClassification", - "XLMRobertaModel", - "XLMRobertaPreTrainedModel", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xlm_roberta"] = [ - "TFXLMRobertaForCausalLM", - "TFXLMRobertaForMaskedLM", - "TFXLMRobertaForMultipleChoice", - "TFXLMRobertaForQuestionAnswering", - "TFXLMRobertaForSequenceClassification", - "TFXLMRobertaForTokenClassification", - "TFXLMRobertaModel", - "TFXLMRobertaPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_xlm_roberta"] = [ - "FlaxXLMRobertaForMaskedLM", - "FlaxXLMRobertaForCausalLM", - "FlaxXLMRobertaForMultipleChoice", - "FlaxXLMRobertaForQuestionAnswering", - "FlaxXLMRobertaForSequenceClassification", - "FlaxXLMRobertaForTokenClassification", - "FlaxXLMRobertaModel", - "FlaxXLMRobertaPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_xlm_roberta import ( - XLMRobertaConfig, - XLMRobertaOnnxConfig, - ) - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlm_roberta import XLMRobertaTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm_roberta import ( - XLMRobertaForCausalLM, - XLMRobertaForMaskedLM, - XLMRobertaForMultipleChoice, - XLMRobertaForQuestionAnswering, - XLMRobertaForSequenceClassification, - XLMRobertaForTokenClassification, - XLMRobertaModel, - XLMRobertaPreTrainedModel, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xlm_roberta import ( - TFXLMRobertaForCausalLM, - TFXLMRobertaForMaskedLM, - TFXLMRobertaForMultipleChoice, - TFXLMRobertaForQuestionAnswering, - TFXLMRobertaForSequenceClassification, - TFXLMRobertaForTokenClassification, - TFXLMRobertaModel, - TFXLMRobertaPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_xlm_roberta import ( - FlaxXLMRobertaForCausalLM, - FlaxXLMRobertaForMaskedLM, - FlaxXLMRobertaForMultipleChoice, - FlaxXLMRobertaForQuestionAnswering, - FlaxXLMRobertaForSequenceClassification, - FlaxXLMRobertaForTokenClassification, - FlaxXLMRobertaModel, - FlaxXLMRobertaPreTrainedModel, - ) - + from .configuration_xlm_roberta import * + from .modeling_flax_xlm_roberta import * + from .modeling_tf_xlm_roberta import * + from .modeling_xlm_roberta import * + from .tokenization_xlm_roberta import * + from .tokenization_xlm_roberta_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py index 100321db481f..3b17f33ed600 100644 --- a/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py @@ -152,3 +152,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["XLMRobertaConfig", "XLMRobertaOnnxConfig"] diff --git a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py index e700fcd0244a..b948dce7550f 100644 --- a/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_flax_xlm_roberta.py @@ -1497,3 +1497,14 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs): FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, ) + +__all__ = [ + "FlaxXLMRobertaPreTrainedModel", + "FlaxXLMRobertaModel", + "FlaxXLMRobertaForMaskedLM", + "FlaxXLMRobertaForSequenceClassification", + "FlaxXLMRobertaForMultipleChoice", + "FlaxXLMRobertaForTokenClassification", + "FlaxXLMRobertaForQuestionAnswering", + "FlaxXLMRobertaForCausalLM", +] diff --git a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py index efc5b696676c..713654b49835 100644 --- a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py @@ -1777,3 +1777,16 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFXLMRobertaPreTrainedModel", + "TFXLMRobertaModel", + "TFXLMRobertaForMaskedLM", + "TFXLMRobertaForCausalLM", + "TFXLMRobertaForSequenceClassification", + "TFXLMRobertaForMultipleChoice", + "TFXLMRobertaForTokenClassification", + "TFXLMRobertaForQuestionAnswering", + "TFXLMRobertaMainLayer", +] diff --git a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py index 3ac94e75f92f..c2061e30d06d 100644 --- a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py @@ -1716,3 +1716,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "XLMRobertaPreTrainedModel", + "XLMRobertaModel", + "XLMRobertaForCausalLM", + "XLMRobertaForMaskedLM", + "XLMRobertaForSequenceClassification", + "XLMRobertaForMultipleChoice", + "XLMRobertaForTokenClassification", + "XLMRobertaForQuestionAnswering", +] diff --git a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py index 35a06aeb91be..3c33f2cc0c94 100644 --- a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py @@ -22,6 +22,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -31,6 +32,7 @@ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} +@export(backends=("sentencepiece",)) class XLMRobertaTokenizer(PreTrainedTokenizer): """ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on @@ -294,3 +296,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["XLMRobertaTokenizer"] diff --git a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py index 4ad2596a6fbf..b51a9340dbfe 100644 --- a/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py +++ b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py @@ -193,3 +193,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["XLMRobertaTokenizerFast"] diff --git a/src/transformers/models/xlm_roberta_xl/__init__.py b/src/transformers/models/xlm_roberta_xl/__init__.py index 68ae26b06d6c..c0ec76caad74 100644 --- a/src/transformers/models/xlm_roberta_xl/__init__.py +++ b/src/transformers/models/xlm_roberta_xl/__init__.py @@ -11,60 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available - - -_import_structure = { - "configuration_xlm_roberta_xl": [ - "XLMRobertaXLConfig", - "XLMRobertaXLOnnxConfig", - ], -} +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlm_roberta_xl"] = [ - "XLMRobertaXLForCausalLM", - "XLMRobertaXLForMaskedLM", - "XLMRobertaXLForMultipleChoice", - "XLMRobertaXLForQuestionAnswering", - "XLMRobertaXLForSequenceClassification", - "XLMRobertaXLForTokenClassification", - "XLMRobertaXLModel", - "XLMRobertaXLPreTrainedModel", - ] if TYPE_CHECKING: - from .configuration_xlm_roberta_xl import ( - XLMRobertaXLConfig, - XLMRobertaXLOnnxConfig, - ) - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlm_roberta_xl import ( - XLMRobertaXLForCausalLM, - XLMRobertaXLForMaskedLM, - XLMRobertaXLForMultipleChoice, - XLMRobertaXLForQuestionAnswering, - XLMRobertaXLForSequenceClassification, - XLMRobertaXLForTokenClassification, - XLMRobertaXLModel, - XLMRobertaXLPreTrainedModel, - ) - + from .configuration_xlm_roberta_xl import * + from .modeling_xlm_roberta_xl import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py index 6ee323ae76ca..dce18640a5b7 100644 --- a/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py @@ -148,3 +148,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig"] diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index f66a32291794..18ebc8ce69b5 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -1672,3 +1672,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "XLMRobertaXLPreTrainedModel", + "XLMRobertaXLModel", + "XLMRobertaXLForCausalLM", + "XLMRobertaXLForMaskedLM", + "XLMRobertaXLForSequenceClassification", + "XLMRobertaXLForMultipleChoice", + "XLMRobertaXLForTokenClassification", + "XLMRobertaXLForQuestionAnswering", +] diff --git a/src/transformers/models/xlnet/__init__.py b/src/transformers/models/xlnet/__init__.py index f50d4cc178d3..8bfd5a494624 100644 --- a/src/transformers/models/xlnet/__init__.py +++ b/src/transformers/models/xlnet/__init__.py @@ -11,128 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import TYPE_CHECKING -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = {"configuration_xlnet": ["XLNetConfig"]} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlnet"] = ["XLNetTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_xlnet_fast"] = ["XLNetTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xlnet"] = [ - "XLNetForMultipleChoice", - "XLNetForQuestionAnswering", - "XLNetForQuestionAnsweringSimple", - "XLNetForSequenceClassification", - "XLNetForTokenClassification", - "XLNetLMHeadModel", - "XLNetModel", - "XLNetPreTrainedModel", - "load_tf_weights_in_xlnet", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_xlnet"] = [ - "TFXLNetForMultipleChoice", - "TFXLNetForQuestionAnsweringSimple", - "TFXLNetForSequenceClassification", - "TFXLNetForTokenClassification", - "TFXLNetLMHeadModel", - "TFXLNetMainLayer", - "TFXLNetModel", - "TFXLNetPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_xlnet import XLNetConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlnet import XLNetTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_xlnet_fast import XLNetTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xlnet import ( - XLNetForMultipleChoice, - XLNetForQuestionAnswering, - XLNetForQuestionAnsweringSimple, - XLNetForSequenceClassification, - XLNetForTokenClassification, - XLNetLMHeadModel, - XLNetModel, - XLNetPreTrainedModel, - load_tf_weights_in_xlnet, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_xlnet import ( - TFXLNetForMultipleChoice, - TFXLNetForQuestionAnsweringSimple, - TFXLNetForSequenceClassification, - TFXLNetForTokenClassification, - TFXLNetLMHeadModel, - TFXLNetMainLayer, - TFXLNetModel, - TFXLNetPreTrainedModel, - ) - + from .configuration_xlnet import * + from .modeling_tf_xlnet import * + from .modeling_xlnet import * + from .tokenization_xlnet import * + from .tokenization_xlnet_fast import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xlnet/configuration_xlnet.py b/src/transformers/models/xlnet/configuration_xlnet.py index 0a35b204f4b3..4a7238eb4c09 100644 --- a/src/transformers/models/xlnet/configuration_xlnet.py +++ b/src/transformers/models/xlnet/configuration_xlnet.py @@ -235,3 +235,6 @@ def max_position_embeddings(self, value): raise NotImplementedError( f"The model {self.model_type} is one of the few models that has no sequence length limit." ) + + +__all__ = ["XLNetConfig"] diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index eeacebae0448..a506f08e16c2 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -1807,3 +1807,15 @@ def build(self, input_shape=None): if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) + + +__all__ = [ + "TFXLNetPreTrainedModel", + "TFXLNetModel", + "TFXLNetLMHeadModel", + "TFXLNetForSequenceClassification", + "TFXLNetForMultipleChoice", + "TFXLNetForTokenClassification", + "TFXLNetForQuestionAnsweringSimple", + "TFXLNetMainLayer", +] diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index 5d424ebe12dd..2afaee173ea3 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -2079,3 +2079,16 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +__all__ = [ + "load_tf_weights_in_xlnet", + "XLNetPreTrainedModel", + "XLNetModel", + "XLNetLMHeadModel", + "XLNetForSequenceClassification", + "XLNetForTokenClassification", + "XLNetForMultipleChoice", + "XLNetForQuestionAnsweringSimple", + "XLNetForQuestionAnswering", +] diff --git a/src/transformers/models/xlnet/tokenization_xlnet.py b/src/transformers/models/xlnet/tokenization_xlnet.py index 9d4b35775efb..ab88c7ee368e 100644 --- a/src/transformers/models/xlnet/tokenization_xlnet.py +++ b/src/transformers/models/xlnet/tokenization_xlnet.py @@ -23,6 +23,7 @@ from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging +from ...utils.import_utils import export logger = logging.get_logger(__name__) @@ -38,6 +39,7 @@ SEG_ID_PAD = 4 +@export(backends=("sentencepiece",)) class XLNetTokenizer(PreTrainedTokenizer): """ Construct an XLNet tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). @@ -380,3 +382,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = fi.write(content_spiece_model) return (out_vocab_file,) + + +__all__ = ["XLNetTokenizer"] diff --git a/src/transformers/models/xlnet/tokenization_xlnet_fast.py b/src/transformers/models/xlnet/tokenization_xlnet_fast.py index a506e8c45a3c..0a6ae907ef57 100644 --- a/src/transformers/models/xlnet/tokenization_xlnet_fast.py +++ b/src/transformers/models/xlnet/tokenization_xlnet_fast.py @@ -229,3 +229,6 @@ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) + + +__all__ = ["XLNetTokenizerFast"] diff --git a/src/transformers/models/xmod/__init__.py b/src/transformers/models/xmod/__init__.py index 9b9cb36e3b93..71fd959a9257 100644 --- a/src/transformers/models/xmod/__init__.py +++ b/src/transformers/models/xmod/__init__.py @@ -1,71 +1,17 @@ # flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure -_import_structure = { - "configuration_xmod": [ - "XmodConfig", - "XmodOnnxConfig", - ], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_xmod"] = [ - "XmodForCausalLM", - "XmodForMaskedLM", - "XmodForMultipleChoice", - "XmodForQuestionAnswering", - "XmodForSequenceClassification", - "XmodForTokenClassification", - "XmodModel", - "XmodPreTrainedModel", - ] - if TYPE_CHECKING: - from .configuration_xmod import XmodConfig, XmodOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_xmod import ( - XmodForCausalLM, - XmodForMaskedLM, - XmodForMultipleChoice, - XmodForQuestionAnswering, - XmodForSequenceClassification, - XmodForTokenClassification, - XmodModel, - XmodPreTrainedModel, - ) - + from .configuration_xmod import * + from .modeling_xmod import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/xmod/configuration_xmod.py b/src/transformers/models/xmod/configuration_xmod.py index 34261a0d7cdf..68ad14307f05 100644 --- a/src/transformers/models/xmod/configuration_xmod.py +++ b/src/transformers/models/xmod/configuration_xmod.py @@ -180,3 +180,6 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("attention_mask", dynamic_axis), ] ) + + +__all__ = ["XmodConfig", "XmodOnnxConfig"] diff --git a/src/transformers/models/xmod/modeling_xmod.py b/src/transformers/models/xmod/modeling_xmod.py index b1ca8116a72a..5bbb3f608247 100644 --- a/src/transformers/models/xmod/modeling_xmod.py +++ b/src/transformers/models/xmod/modeling_xmod.py @@ -1640,3 +1640,15 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx + + +__all__ = [ + "XmodPreTrainedModel", + "XmodModel", + "XmodForCausalLM", + "XmodForMaskedLM", + "XmodForSequenceClassification", + "XmodForMultipleChoice", + "XmodForTokenClassification", + "XmodForQuestionAnswering", +] diff --git a/src/transformers/models/yolos/__init__.py b/src/transformers/models/yolos/__init__.py index fdf7c5db1cb2..ff39381c277a 100644 --- a/src/transformers/models/yolos/__init__.py +++ b/src/transformers/models/yolos/__init__.py @@ -13,59 +13,17 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_yolos": ["YolosConfig", "YolosOnnxConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_yolos"] = ["YolosFeatureExtractor"] - _import_structure["image_processing_yolos"] = ["YolosImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_yolos"] = [ - "YolosForObjectDetection", - "YolosModel", - "YolosPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_yolos import YolosConfig, YolosOnnxConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_yolos import YolosFeatureExtractor - from .image_processing_yolos import YolosImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_yolos import ( - YolosForObjectDetection, - YolosModel, - YolosPreTrainedModel, - ) - - + from .configuration_yolos import * + from .feature_extraction_yolos import * + from .image_processing_yolos import * + from .modeling_yolos import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/yolos/configuration_yolos.py b/src/transformers/models/yolos/configuration_yolos.py index d6fe04f42660..a488216cf8da 100644 --- a/src/transformers/models/yolos/configuration_yolos.py +++ b/src/transformers/models/yolos/configuration_yolos.py @@ -173,3 +173,6 @@ def atol_for_validation(self) -> float: @property def default_onnx_opset(self) -> int: return 12 + + +__all__ = ["YolosConfig", "YolosOnnxConfig"] diff --git a/src/transformers/models/yolos/feature_extraction_yolos.py b/src/transformers/models/yolos/feature_extraction_yolos.py index 5696ee65bbdf..b5163aad8c1a 100644 --- a/src/transformers/models/yolos/feature_extraction_yolos.py +++ b/src/transformers/models/yolos/feature_extraction_yolos.py @@ -18,6 +18,7 @@ from ...image_transforms import rgb_to_id as _rgb_to_id from ...utils import logging +from ...utils.import_utils import export from .image_processing_yolos import YolosImageProcessor @@ -33,6 +34,7 @@ def rgb_to_id(x): return _rgb_to_id(x) +@export(backends=("vision",)) class YolosFeatureExtractor(YolosImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( @@ -41,3 +43,6 @@ def __init__(self, *args, **kwargs) -> None: FutureWarning, ) super().__init__(*args, **kwargs) + + +__all__ = ["YolosFeatureExtractor"] diff --git a/src/transformers/models/yolos/image_processing_yolos.py b/src/transformers/models/yolos/image_processing_yolos.py index 19b21333f609..60145935711d 100644 --- a/src/transformers/models/yolos/image_processing_yolos.py +++ b/src/transformers/models/yolos/image_processing_yolos.py @@ -62,6 +62,7 @@ is_vision_available, logging, ) +from ...utils.import_utils import export if is_torch_available(): @@ -720,6 +721,7 @@ def compute_segments( return segmentation, segments +@export(backends=("vision",)) class YolosImageProcessor(BaseImageProcessor): r""" Constructs a Detr image processor. @@ -1530,3 +1532,6 @@ def post_process_object_detection( results.append({"scores": score, "labels": label, "boxes": box}) return results + + +__all__ = ["YolosImageProcessor"] diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index 9b97d39b4a03..c39670fc5155 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -1360,3 +1360,6 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask) + + +__all__ = ["YolosPreTrainedModel", "YolosModel", "YolosForObjectDetection"] diff --git a/src/transformers/models/yoso/__init__.py b/src/transformers/models/yoso/__init__.py index c4c73385017e..74871d9f488a 100644 --- a/src/transformers/models/yoso/__init__.py +++ b/src/transformers/models/yoso/__init__.py @@ -13,51 +13,15 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = {"configuration_yoso": ["YosoConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_yoso"] = [ - "YosoForMaskedLM", - "YosoForMultipleChoice", - "YosoForQuestionAnswering", - "YosoForSequenceClassification", - "YosoForTokenClassification", - "YosoLayer", - "YosoModel", - "YosoPreTrainedModel", - ] +from ...utils import _LazyModule +from ...utils.import_utils import define_import_structure if TYPE_CHECKING: - from .configuration_yoso import YosoConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_yoso import ( - YosoForMaskedLM, - YosoForMultipleChoice, - YosoForQuestionAnswering, - YosoForSequenceClassification, - YosoForTokenClassification, - YosoLayer, - YosoModel, - YosoPreTrainedModel, - ) - - + from .configuration_yoso import * + from .modeling_yoso import * else: import sys - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) + _file = globals()["__file__"] + sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) diff --git a/src/transformers/models/yoso/configuration_yoso.py b/src/transformers/models/yoso/configuration_yoso.py index e353744cc7a6..9a7fb1218e40 100644 --- a/src/transformers/models/yoso/configuration_yoso.py +++ b/src/transformers/models/yoso/configuration_yoso.py @@ -139,3 +139,6 @@ def __init__( self.conv_window = conv_window self.use_fast_hash = use_fast_hash self.lsh_backward = lsh_backward + + +__all__ = ["YosoConfig"] diff --git a/src/transformers/models/yoso/modeling_yoso.py b/src/transformers/models/yoso/modeling_yoso.py index a94c6de542ee..2903170c8d28 100644 --- a/src/transformers/models/yoso/modeling_yoso.py +++ b/src/transformers/models/yoso/modeling_yoso.py @@ -1306,3 +1306,14 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +__all__ = [ + "YosoPreTrainedModel", + "YosoModel", + "YosoForMaskedLM", + "YosoForSequenceClassification", + "YosoForMultipleChoice", + "YosoForTokenClassification", + "YosoForQuestionAnswering", +]