Skip to content

Commit

Permalink
Configured pydocstyle for the trainer, callbacks, loggers, optimize…
Browse files Browse the repository at this point in the history
…rs, and utils. (#1089)

This PR enables the `pydocstyle` pre-commit hook for most of the codebase. Docstrings were brought up to compliance so they would pass the pydocstyle check.

* The main changes involved ensuring summary docstring lines were indeed just one line, that they ended with periods, and that there were no blank lines between the end of the docstring and the start of the function.
* Added docstrings for missing arguments that were identified.
* (Coming along for the ride): Remove calls to `trainer.engine.close`, as now `close()` is invoked automatically in `__del__` as part of #948
  • Loading branch information
ravi-mosaicml authored Jun 2, 2022
1 parent 17f24b4 commit 0685f35
Show file tree
Hide file tree
Showing 100 changed files with 491 additions and 464 deletions.
4 changes: 2 additions & 2 deletions .ci/test_lint_doctests.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_run_doctests():

@pytest.mark.timeout(30)
def test_docker_build_matrix():
"""Test that the docker build matrix is up to date"""
"""Test that the docker build matrix is up to date."""
docker_folder = pathlib.Path(os.path.dirname(__file__)) / '..' / 'docker'

# Capture the existing readme and build matrix contents
Expand Down Expand Up @@ -86,7 +86,7 @@ def test_docker_build_matrix():

@pytest.mark.parametrize("example", [1, 2])
def test_release_tests_reflect_readme(example: int):
"""Test that example_1.py and example_2.py in release_tests reflect the README.md"""
"""Test that example_1.py and example_2.py in release_tests reflect the README.md."""
with open(pathlib.Path(os.path.dirname(__file__)) / '..' / 'README.md', 'r') as f:
readme_lines = f.readlines()
example_code_lines = []
Expand Down
25 changes: 12 additions & 13 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,17 @@ repos:
pass_filenames: false
args: [--warnings]
additional_dependencies: ["pyright@1.1.247"]
# - repo: https://github.com/PyCQA/pydocstyle
# hooks:
# - id: pydocstyle
# name: pydocstyle
# entry: pydocstyle
# language: python
# types: [python]
# additional_dependencies:
# - "toml"
# rev: 6.1.1
- repo: https://github.com/PyCQA/pydocstyle
hooks:
- id: pydocstyle
name: pydocstyle
entry: pydocstyle
language: python
types: [python]
exclude: '(?:tests|.ci|composer\/algorithms|composer\/datasets|composer\/models)\/.*'
additional_dependencies:
- "toml"
rev: 6.1.1
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.1.0
hooks:
Expand All @@ -72,13 +73,11 @@ repos:
- id: check-yaml
- id: debug-statements
- id: destroyed-symlinks
# - id: double-quote-string-fixer # TODO(ravi): Enable this check later. Generates a large diff.
# - id: double-quote-string-fixer # TODO(ravi): Enable this check later. Generates a large diff.
- id: end-of-file-fixer
- id: fix-byte-order-marker
- id: mixed-line-ending
- id: trailing-whitespace
# - id: name-tests-test # TODO(ravi): Enable this check later. Generates a large diff.
# args: ['--django']
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.1.13
hooks:
Expand Down
2 changes: 2 additions & 0 deletions composer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0

"""Composer."""

from composer import algorithms as algorithms
from composer import callbacks as callbacks
from composer import datasets as datasets
Expand Down
20 changes: 11 additions & 9 deletions composer/algorithms/augmix/augmix.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ def augmix_image(img: ImgT,
width: int = 3,
alpha: float = 1.0,
augmentation_set: List = augmentation_sets["all"]) -> ImgT:
"""Applies AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) data
augmentation to a single image or batch of images. See
:class:`.AugMix` and the
:doc:`Method Card </method_cards/augmix>` for details. This function only acts on a
single image (or batch) per call and is unlikely to be used in a training loop. Use
:class:`~composer.algorithms.augmix.augmix.AugmentAndMixTransform` to use AugMix as
part of a :class:`torchvision.datasets.VisionDataset`\\'s ``transform``.
r"""Applies the AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) data augmentation.
This function works on a single image or batch of images. See :class:`.AugMix` and
the :doc:`Method Card </method_cards/augmix>` for details. This function only acts on a
single image (or batch) per call and is unlikely to be used in a training loop.
Use :class:`~composer.algorithms.augmix.augmix.AugmentAndMixTransform` to use AugMix as
part of a :class:`torchvision.datasets.VisionDataset`\'s ``transform``.
Example:
.. testcode::
Expand Down Expand Up @@ -166,7 +166,9 @@ def forward(self, img: PillowImage) -> PillowImage:


class AugMix(Algorithm):
"""AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) creates ``width`` sequences of ``depth``
r"""The AugMix data augmentation technique.
AugMix (`Hendrycks et al, 2020 <http://arxiv.org/abs/1912.02781>`_) creates ``width`` sequences of ``depth``
image augmentations, applies each sequence with random intensity, and returns a convex combination of the ``width``
augmented images and the original image. The coefficients for mixing the augmented images are drawn from a uniform
``Dirichlet(alpha, alpha, ...)`` distribution. The coefficient for mixing the combined augmented image and the
Expand Down Expand Up @@ -224,7 +226,7 @@ class AugMix(Algorithm):
``"color"``, ``"contrast"``, ``"sharpness"``, and ``"brightness"``. The
original implementations have an intensity sampling scheme that samples a
value bounded by 0.118 at a minimum, and a maximum value of
:math:`intensity \\times 0.18 + .1`, which ranges from 0.28 (intensity = 1)
:math:`intensity \times 0.18 + .1`, which ranges from 0.28 (intensity = 1)
to 1.9 (intensity 10). These augmentations have different effects
depending on whether they are < 0 or > 0 (or < 1 or > 1).
"all" uses implementations of "color", "contrast",
Expand Down
2 changes: 1 addition & 1 deletion composer/algorithms/blurpool/blurpool_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def _padding_for_filt_2d_same(filt: torch.Tensor):


def blur_2d(input: torch.Tensor, stride: _size_2_t = 1, filter: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Apply a spatial low-pass filter.
"""Applies a spatial low-pass filter.
Args:
input (:class:`torch.Tensor`): a 4d tensor of shape NCHW
Expand Down
2 changes: 1 addition & 1 deletion composer/algorithms/cutout/cutout.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def match(self, event: Event, state: State) -> bool:
return event == Event.AFTER_DATALOADER

def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Apply cutout on input images."""
"""Applies cutout on input images."""
x = state.batch_get_item(self.input_key)
assert isinstance(x, Tensor), "Multiple tensors not supported for Cutout."

Expand Down
2 changes: 1 addition & 1 deletion composer/algorithms/progressive_resizing/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0

"""Apply Fastai's `progressive resizing <https://github.com/fastai/fastbook/blob/780b76bef3127ce5b64f8230fce60e915a
"""Applies Fastai's `progressive resizing <https://github.com/fastai/fastbook/blob/780b76bef3127ce5b64f8230fce60e915a
7e0735/07_sizing_and_tta.ipynb>`__ data augmentation to speed up training.
Progressive resizing initially reduces input resolution to speed up early training. Throughout training, the
Expand Down
12 changes: 9 additions & 3 deletions composer/algorithms/progressive_resizing/progressive_resizing.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,9 @@ def resize_batch(input: torch.Tensor,


class ProgressiveResizing(Algorithm):
"""Apply Fastai's `progressive resizing <https://\\
r"""Resize inputs and optionally outputs by cropping or interpolating.
Apply Fastai's `progressive resizing <https://\
github.com/fastai/fastbook/blob/780b76bef3127ce5b64f8230fce60e915a7e0735/07_sizing_and_tta.ipynb>`__ data
augmentation to speed up training.
Expand Down Expand Up @@ -204,6 +206,7 @@ def match(self, event: Event, state: State) -> bool:
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
Returns:
bool: True if this algorithm should run now
"""
Expand Down Expand Up @@ -271,8 +274,11 @@ def _make_crop(tensor: torch.Tensor, scale_factor: float) -> T_ResizeTransform:

def _make_crop_pair(X: torch.Tensor, y: torch.Tensor,
scale_factor: float) -> Tuple[T_ResizeTransform, T_ResizeTransform]:
"""Makes a pair of random crops for an input image X and target tensor y such that the same region is selected from
both."""
"""Makes a pair of random crops.
Crops input image X and target tensor y such that the same region is selected from
both.
"""
# New height and width for X
HcX = int(scale_factor * X.shape[2])
WcX = int(scale_factor * X.shape[3])
Expand Down
2 changes: 1 addition & 1 deletion composer/algorithms/squeeze_excite/squeeze_excite.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def match(self, event: Event, state: State) -> bool:
return event == Event.INIT

def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Apply the Squeeze-and-Excitation layer replacement.
"""Applies the Squeeze-and-Excitation layer replacement.
Args:
event (Event): the current event
Expand Down
1 change: 0 additions & 1 deletion composer/algorithms/stochastic_depth/stochastic_depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,6 @@ def __init__(self,
@property
def find_unused_parameters(self) -> bool:
"""DDP parameter to notify that parameters may not have gradients if it is dropped during the forward pass."""

return (self.stochastic_method == "block")

def match(self, event: Event, state: State) -> bool:
Expand Down
2 changes: 1 addition & 1 deletion composer/algorithms/swa/swa.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def _assert_valid_duration(time: Time):


class SWA(Algorithm):
"""Apply Stochastic Weight Averaging (`Izmailov et al, 2018 <https://arxiv.org/abs/1803.05407>`_)
"""Applies Stochastic Weight Averaging (`Izmailov et al, 2018 <https://arxiv.org/abs/1803.05407>`_)
Stochastic Weight Averaging (SWA) averages model weights sampled at
different times near the end of training. This leads to better
Expand Down
13 changes: 9 additions & 4 deletions composer/algorithms/utils/augmentation_primitives.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ def _float_parameter(level: float, maxval: float):
level (float): Level of the operation that will be between [0, 10].
maxval (float): Maximum value that the operation can have. This will be scaled to
level/10.
Returns:
float: The result from scaling ``maxval`` according to ``level``.
"""
Expand All @@ -88,8 +89,10 @@ def _sample_level(n: float):


def _symmetric_sample(level: float):
"""Helper function to sample from a distribution over the domain [0.1, 10] with median == 1 and uniform probability
of x | 0.1 ≤ x ≤ 1, and x | 1 ≤ x ≤ 10.
"""Helper function to sample from a symmetric distribution.
The distribution over the domain [0.1, 10] with median == 1 and uniform probability of x | 0.1 ≤ x ≤ 1,
and x | 1 ≤ x ≤ 10.
Used for sampling transforms that can range from intensity 0 to infinity, and for which an intensity of 1 == no
change.
Expand All @@ -106,7 +109,8 @@ def autocontrast(pil_img: Image.Image, level: float = 0.0):
.. seealso:: :func:`PIL.ImageOps.autocontrast`.
Args:
pil_img (Image.Image): The image
pil_img (Image.Image): The image.
level (float): The intensity.
"""
del level # unused
return ImageOps.autocontrast(pil_img)
Expand All @@ -118,7 +122,8 @@ def equalize(pil_img: Image.Image, level: float):
.. seealso:: :func:`PIL.ImageOps.equalize`.
Args:
pil_img (Image.Image): The image
pil_img (Image.Image): The image.
level (float): The intensity.
"""
del level # unused
return ImageOps.equalize(pil_img)
Expand Down
2 changes: 2 additions & 0 deletions composer/callbacks/callback_hparams_registry.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0

"""Hyperparameter registry for callbacks."""

from typing import Dict, Type, Union

import yahp as hp
Expand Down
10 changes: 3 additions & 7 deletions composer/callbacks/checkpoint_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@


def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
"""Helper function to create a checkpoint scheduler according to a specified interval.
r"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, :class:`.Time`]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`.TimeUnit.EPOCH`\\s.
saved. If an integer, it will be assumed to be in :attr:`.TimeUnit.EPOCH`\s.
Otherwise, the unit must be either :attr:`.TimeUnit.EPOCH` or :attr:`.TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
Expand Down Expand Up @@ -85,7 +85,7 @@ def save_interval(state: State, event: Event):
return save_interval


class CheckpointSaver(Callback):
class CheckpointSaver(Callback): # noqa: D101
__doc__ = f"""Callback to save checkpoints.
.. note::
Expand Down Expand Up @@ -114,10 +114,6 @@ class CheckpointSaver(Callback):
... )
... ])
.. testcleanup::
trainer.engine.close()
Args:
folder (str, optional): Format string for the folder where checkpoints will be saved.
Default: ``'{{run_name}}/checkpoints'``.
Expand Down
6 changes: 2 additions & 4 deletions composer/callbacks/early_stopper.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,9 @@


class EarlyStopper(Callback):
"""This callback tracks a training or evaluation metric and halts training if the metric does not
improve within a given interval.
Example
"""Halt training if a metric does not improve within a given interval.
Example:
.. doctest::
>>> from composer.callbacks.early_stopper import EarlyStopper
Expand Down
8 changes: 1 addition & 7 deletions composer/callbacks/grad_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ class GradMonitor(Callback):
the model and hence may cause a reduction in throughput while training large models. In order to ensure the
correctness of norm, this function should be called after gradient unscaling in cases where gradients are scaled.
Example
Example:
.. doctest::
>>> from composer.callbacks import GradMonitor
Expand All @@ -32,10 +31,6 @@ class GradMonitor(Callback):
... callbacks=[GradMonitor()],
... )
.. testcleanup::
trainer.engine.close()
The L2 norms are logged by the :class:`~composer.loggers.logger.Logger` to the following keys as described below.
+-----------------------------------+-------------------------------------------------------------+
Expand All @@ -57,7 +52,6 @@ class GradMonitor(Callback):
"""

def __init__(self, log_layer_grad_norms: bool = False):
super().__init__()
self.log_layer_grad_norms = log_layer_grad_norms

def after_train_batch(self, state: State, logger: Logger):
Expand Down
9 changes: 2 additions & 7 deletions composer/callbacks/lr_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,7 @@ class LRMonitor(Callback):
This callback iterates over all optimizers and their parameter groups to log learning rate under the
``lr-{OPTIMIZER_NAME}/group{GROUP_NUMBER}`` key.
Example
Example:
.. doctest::
>>> from composer.callbacks import LRMonitor
Expand All @@ -29,10 +28,6 @@ class LRMonitor(Callback):
... callbacks=[LRMonitor()],
... )
.. testcleanup::
trainer.engine.close()
The learning rate is logged by the :class:`~composer.loggers.logger.Logger` to the following key as described
below.
Expand All @@ -46,7 +41,7 @@ class LRMonitor(Callback):
"""

def __init__(self) -> None:
super().__init__()
pass

def batch_end(self, state: State, logger: Logger):
assert state.optimizers is not None, "optimizers must be defined"
Expand Down
7 changes: 1 addition & 6 deletions composer/callbacks/memory_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ class MemoryMonitor(Callback):
This callback calls the torch memory stats API for cuda (see :func:`torch.cuda.memory_stats`) on the
:attr:`~composer.core.event.Event.AFTER_TRAIN_BATCH` and reports different memory statistics.
Example
Example:
.. doctest::
>>> from composer.callbacks import MemoryMonitor
Expand All @@ -38,10 +37,6 @@ class MemoryMonitor(Callback):
... callbacks=[MemoryMonitor()],
... )
.. testcleanup::
trainer.engine.close()
The memory statistics are logged by the :class:`~composer.loggers.logger.Logger` to the following keys as
described below.
Expand Down
Loading

0 comments on commit 0685f35

Please sign in to comment.