From 84bb846241d985f7f5ee03a43b50d6573a46b3fb Mon Sep 17 00:00:00 2001 From: ravi-mosaicml <87037432+ravi-mosaicml@users.noreply.github.com> Date: Mon, 27 Dec 2021 13:34:20 -0800 Subject: [PATCH] A yapf update broke some formatting...re-running the linter (#188) --- composer/algorithms/alibi/alibi.py | 2 +- composer/datasets/hparams.py | 2 +- composer/optim/pytorch_future.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/composer/algorithms/alibi/alibi.py b/composer/algorithms/alibi/alibi.py index 9c81456e4e2..209b7c199c9 100644 --- a/composer/algorithms/alibi/alibi.py +++ b/composer/algorithms/alibi/alibi.py @@ -90,7 +90,7 @@ def apply_alibi(model: torch.nn.Module, heads_per_layer: int, max_sequence_lengt zero_and_freeze_expand_position_embeddings(model=model, attribute=position_embedding_attribute, new_embedding_length=max_sequence_length) - log.info(f" Position embedding expanded to sequence " f"length {max_sequence_length}, zeroed, and frozen") + log.info(f" Position embedding expanded to sequence length {max_sequence_length}, zeroed, and frozen") def convert_attention(module: torch.nn.Module, module_index: int = None): module = register_alibi(module=module, n_heads=heads_per_layer, max_token_length=max_sequence_length) diff --git a/composer/datasets/hparams.py b/composer/datasets/hparams.py index 2bc0fafde60..b14fe60bcb7 100644 --- a/composer/datasets/hparams.py +++ b/composer/datasets/hparams.py @@ -71,7 +71,7 @@ class SyntheticHparamsMixin(hp.Hparams, abc.ABC): Ignored if :attr:`use_synthetic` is False. (Default: ``CONTIGUOUS_FORMAT``) """ - use_synthetic: bool = hp.optional("Whether to use synthetic data. Defaults to False." "", default=False) + use_synthetic: bool = hp.optional("Whether to use synthetic data. Defaults to False.", default=False) synthetic_num_unique_samples: int = hp.optional("The number of unique samples to allocate memory for.", default=100) synthetic_device: str = hp.optional("Device to store the sample pool. Should be `cuda` or `cpu`. Defauls to `cpu`.", default="cpu") diff --git a/composer/optim/pytorch_future.py b/composer/optim/pytorch_future.py index 32e69a3bde8..3dd56e08924 100644 --- a/composer/optim/pytorch_future.py +++ b/composer/optim/pytorch_future.py @@ -69,7 +69,7 @@ def __init__(self, verbose=False, interval='step'): if warmup_method not in ("constant", "linear"): - raise ValueError("Only 'constant' or 'linear' warmup_method accepted, but " "got {}".format(warmup_method)) + raise ValueError("Only 'constant' or 'linear' warmup_method accepted, but got {}".format(warmup_method)) self.warmup_factor = warmup_factor self.warmup_iters = warmup_iters self.warmup_method = warmup_method @@ -84,7 +84,8 @@ def get_lr(self): """ if not self._get_lr_called_within_step: - warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.") + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.") if self.last_epoch == 0: return [group['lr'] * self.warmup_factor for group in self.optimizer.param_groups]