Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Replaced deprecated logger.warn with logger.warning #30197

Merged
merged 2 commits into from
Apr 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2225,7 +2225,7 @@ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func)
else:
self.apply(partial(self._set_gradient_checkpointing, value=True))
logger.warn(
logger.warning(
"You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)."
"Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model."
)
Expand Down Expand Up @@ -2273,7 +2273,7 @@ def gradient_checkpointing_disable(self):
if not _is_using_old_format:
self._set_gradient_checkpointing(enable=False)
else:
logger.warn(
logger.warning(
"You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it)."
"Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model."
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,14 +130,14 @@ def _convert_id_to_token(self, index):

# Override since phonemes cannot be converted back to strings
def decode(self, token_ids, **kwargs):
logger.warn(
logger.warning(
"Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead."
)
return self.convert_ids_to_tokens(token_ids)

# Override since phonemes cannot be converted back to strings
def convert_tokens_to_string(self, tokens, **kwargs):
logger.warn(
logger.warning(
"Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens."
)
return tokens
Expand Down
10 changes: 6 additions & 4 deletions src/transformers/models/whisper/generation_whisper.py
Original file line number Diff line number Diff line change
Expand Up @@ -985,16 +985,18 @@ def _maybe_warn_unused_inputs(
"{}, but will be ignored."
)
if condition_on_prev_tokens is not None:
logger.warn(warning_prefix.format(f"condition_on_prev_tokens is set to {condition_on_prev_tokens}"))
logger.warning(warning_prefix.format(f"condition_on_prev_tokens is set to {condition_on_prev_tokens}"))

if compression_ratio_threshold is not None:
logger.warn(warning_prefix.format(f"compression_ratio_threshold is set to {compression_ratio_threshold}"))
logger.warning(
warning_prefix.format(f"compression_ratio_threshold is set to {compression_ratio_threshold}")
)

if logprob_threshold is not None:
logger.warn(warning_prefix.format(f"logprob_threshold is set to {logprob_threshold}"))
logger.warning(warning_prefix.format(f"logprob_threshold is set to {logprob_threshold}"))

if no_speech_threshold is not None:
logger.warn(warning_prefix.format(f"no_speech_threshold is set to {no_speech_threshold}"))
logger.warning(warning_prefix.format(f"no_speech_threshold is set to {no_speech_threshold}"))

# when passing temperature as a list it cannot just be ignored => throw error in this case
if isinstance(temperature, (list, tuple)):
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/quantizers/quantizer_aqlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def is_trainable(self, model: Optional["PreTrainedModel"] = None):
if aqlm_supports_training:
return True
else:
logger.warn(
logger.warning(
f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`"
)
return False
Expand Down