diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 95eb9199ef3..46af416e407 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -283,7 +283,7 @@ def __repr__(self) -> str: class Resize(torch.nn.Module): """Resize the input image to the given size. If the image is torch Tensor, it is expected - to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + to have [..., H, W] shape, where ... means a maximum of two leading dimensions .. warning:: The output image might be different depending on its type: when downsampling, the interpolation of PIL images