Skip to content

Commit

Permalink
Merge branch 'affine-box-helper' of github.com:pmeier/vision into aff…
Browse files Browse the repository at this point in the history
…ine-box-helper
  • Loading branch information
pmeier committed Aug 28, 2023
2 parents 99bbbb2 + 5ea7241 commit f497a9e
Show file tree
Hide file tree
Showing 19 changed files with 168 additions and 275 deletions.
33 changes: 32 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,14 +76,45 @@
"""


class CustomGalleryExampleSortKey:
# See https://sphinx-gallery.github.io/stable/configuration.html#sorting-gallery-examples
# and https://github.com/sphinx-gallery/sphinx-gallery/blob/master/sphinx_gallery/sorting.py
def __init__(self, src_dir):
self.src_dir = src_dir

transforms_subsection_order = [
"plot_transforms_getting_started.py",
"plot_transforms_illustrations.py",
"plot_transforms_e2e.py",
"plot_cutmix_mixup.py",
"plot_custom_transforms.py",
"plot_datapoints.py",
"plot_custom_datapoints.py",
]

def __call__(self, filename):
if "gallery/transforms" in self.src_dir:
try:
return self.transforms_subsection_order.index(filename)
except ValueError as e:
raise ValueError(
"Looks like you added an example in gallery/transforms? "
"You need to specify its order in docs/source/conf.py. Look for CustomGalleryExampleSortKey."
) from e
else:
# For other subsections we just sort alphabetically by filename
return filename


sphinx_gallery_conf = {
"examples_dirs": "../../gallery/", # path to your example scripts
"gallery_dirs": "auto_examples", # path to where to save gallery generated output
"subsection_order": ExplicitOrder(["../../gallery/v2_transforms", "../../gallery/others"]),
"subsection_order": ExplicitOrder(["../../gallery/transforms", "../../gallery/others"]),
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("torchvision",),
"remove_config_comments": True,
"ignore_pattern": "helpers.py",
"within_subsection_order": CustomGalleryExampleSortKey,
}

napoleon_use_ivar = True
Expand Down
2 changes: 1 addition & 1 deletion docs/source/datapoints.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ Datapoints
Datapoints are tensor subclasses which the :mod:`~torchvision.transforms.v2` v2 transforms use under the hood to
dispatch their inputs to the appropriate lower-level kernels. Most users do not
need to manipulate datapoints directly and can simply rely on dataset wrapping -
see e.g. :ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2_e2e.py`.
see e.g. :ref:`sphx_glr_auto_examples_transforms_plot_transforms_e2e.py`.

.. autosummary::
:toctree: generated/
Expand Down
4 changes: 2 additions & 2 deletions docs/source/transforms.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ do to is to update the import to ``torchvision.transforms.v2``. In terms of
output, there might be negligible differences due to implementation differences.

To learn more about the v2 transforms, check out
:ref:`sphx_glr_auto_examples_v2_transforms_plot_transforms_v2.py`.
:ref:`sphx_glr_auto_examples_transforms_plot_transforms_getting_started.py`.

.. TODO: make sure link is still good!!
Expand Down Expand Up @@ -479,7 +479,7 @@ CutMix and MixUp are special transforms that
are meant to be used on batches rather than on individual images, because they
are combining pairs of images together. These can be used after the dataloader
(once the samples are batched), or part of a collation function. See
:ref:`sphx_glr_auto_examples_v2_transforms_plot_cutmix_mixup.py` for detailed usage examples.
:ref:`sphx_glr_auto_examples_transforms_plot_cutmix_mixup.py` for detailed usage examples.

.. autosummary::
:toctree: generated/
Expand Down
2 changes: 1 addition & 1 deletion gallery/others/plot_scripted_tensor_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def show(imgs):
# --------------------------
# Most transforms natively support tensors on top of PIL images (to visualize
# the effect of the transforms, you may refer to see
# :ref:`sphx_glr_auto_examples_others_plot_transforms.py`).
# :ref:`sphx_glr_auto_examples_transforms_plot_transforms_illustrations.py`).
# Using tensor images, we can run the transforms on GPUs if cuda is available!

import torch.nn as nn
Expand Down
4 changes: 4 additions & 0 deletions gallery/transforms/README.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
.. _transforms_gallery:

Transforms
----------
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from torchvision.transforms.v2 import functional as F


def plot(imgs):
def plot(imgs, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
Expand Down Expand Up @@ -40,7 +40,11 @@ def plot(imgs):
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)

ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy())
ax.imshow(img.permute(1, 2, 0).numpy(), **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])

if row_title is not None:
for row_idx in range(num_rows):
axs[row_idx, 0].set(ylabel=row_title[row_idx])

plt.tight_layout()
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_datapoints.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_custom_datapoints.py>` to download the full example code.
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_datapoints.py>` to download the full example code.
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`.
:ref:`sphx_glr_auto_examples_transforms_plot_datapoints.py`.
"""

# %%
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_custom_transforms.py>` to download the full example code.
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_cutmix_mixup.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_cutmix_mixup.py>` to download the full example code.
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_cutmix_mixup.py>` to download the full example code.
:class:`~torchvision.transforms.v2.CutMix` and
:class:`~torchvision.transforms.v2.MixUp` are popular augmentation strategies
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_datapoints.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_datapoints.py>` to download the full example code.
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_datapoints.py>` to download the full example code.
Datapoints are Tensor subclasses introduced together with
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
===============================================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_transforms_v2_e2e.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_transforms_v2_e2e.py>` to download the full example code.
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_transforms_e2e.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_transforms_e2e.py>` to download the full example code.
Object detection and segmentation tasks are natively supported:
``torchvision.transforms.v2`` enables jointly transforming images, videos,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
==================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_transforms_v2.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_transforms_v2.py>` to download the full example code.
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_transforms_getting_started.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_transforms_getting_started.py>` to download the full example code.
This example illustrates all of what you need to know to get started with the
new :mod:`torchvision.transforms.v2` API. We'll cover simple tasks like
Expand Down Expand Up @@ -70,7 +70,7 @@
# <transforms>` to learn more about recommended practices and conventions, or
# explore more :ref:`examples <transforms_gallery>` e.g. how to use augmentation
# transforms like :ref:`CutMix and MixUp
# <sphx_glr_auto_examples_v2_transforms_plot_cutmix_mixup.py>`.
# <sphx_glr_auto_examples_transforms_plot_cutmix_mixup.py>`.
#
# .. note::
#
Expand Down Expand Up @@ -148,7 +148,7 @@
#
# You don't need to know much more about datapoints at this point, but advanced
# users who want to learn more can refer to
# :ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`.
# :ref:`sphx_glr_auto_examples_transforms_plot_datapoints.py`.
#
# What do I pass as input?
# ------------------------
Expand Down Expand Up @@ -243,7 +243,7 @@
#
# from torchvision.datasets import CocoDetection, wrap_dataset_for_transforms_v2
#
# dataset = CocoDetection(..., transforms=my_v2_transforms)
# dataset = CocoDetection(..., transforms=my_transforms)
# dataset = wrap_dataset_for_transforms_v2(dataset)
# # Now the dataset returns datapoints!
#
Expand Down
Loading

0 comments on commit f497a9e

Please sign in to comment.