diff --git a/test/test_transforms_v2_refactored.py b/test/test_transforms_v2_refactored.py index f8a47c7cf39..052cc4291ad 100644 --- a/test/test_transforms_v2_refactored.py +++ b/test/test_transforms_v2_refactored.py @@ -2581,9 +2581,6 @@ def test_transform(self, param, value, make_input): # 2. the fill parameter only has an affect if we need padding kwargs["size"] = [s + 4 for s in self.INPUT_SIZE] - if isinstance(input, PIL.Image.Image) and isinstance(value, (tuple, list)) and len(value) == 1: - pytest.xfail("F._pad_image_pil does not support sequences of length 1 for fill.") - if isinstance(input, tv_tensors.Mask) and isinstance(value, (tuple, list)): pytest.skip("F.pad_mask doesn't support non-scalar fill.") diff --git a/test/transforms_v2_dispatcher_infos.py b/test/transforms_v2_dispatcher_infos.py index 6d7ee64d21a..b84d87eb7ae 100644 --- a/test/transforms_v2_dispatcher_infos.py +++ b/test/transforms_v2_dispatcher_infos.py @@ -1,5 +1,3 @@ -import collections.abc - import pytest import torchvision.transforms.v2.functional as F from torchvision import tv_tensors @@ -112,32 +110,6 @@ def xfail_jit_python_scalar_arg(name, *, reason=None): multi_crop_skips.append(skip_dispatch_tv_tensor) -def xfails_pil(reason, *, condition=None): - return [ - TestMark(("TestDispatchers", test_name), pytest.mark.xfail(reason=reason), condition=condition) - for test_name in ["test_dispatch_pil", "test_pil_output_type"] - ] - - -def fill_sequence_needs_broadcast(args_kwargs): - (image_loader, *_), kwargs = args_kwargs - try: - fill = kwargs["fill"] - except KeyError: - return False - - if not isinstance(fill, collections.abc.Sequence) or len(fill) > 1: - return False - - return image_loader.num_channels > 1 - - -xfails_pil_if_fill_sequence_needs_broadcast = xfails_pil( - "PIL kernel doesn't support sequences of length 1 for `fill` if the number of color channels is larger.", - condition=fill_sequence_needs_broadcast, -) - - DISPATCHER_INFOS = [ DispatcherInfo( F.resized_crop, @@ -159,14 +131,6 @@ def fill_sequence_needs_broadcast(args_kwargs): }, pil_kernel_info=PILKernelInfo(F._pad_image_pil, kernel_name="pad_image_pil"), test_marks=[ - *xfails_pil( - reason=( - "PIL kernel doesn't support sequences of length 1 for argument `fill` and " - "`padding_mode='constant'`, if the number of color channels is larger." - ), - condition=lambda args_kwargs: fill_sequence_needs_broadcast(args_kwargs) - and args_kwargs.kwargs.get("padding_mode", "constant") == "constant", - ), xfail_jit("F.pad only supports vector fills for list of floats", condition=pad_xfail_jit_fill_condition), xfail_jit_python_scalar_arg("padding"), ], @@ -181,7 +145,6 @@ def fill_sequence_needs_broadcast(args_kwargs): }, pil_kernel_info=PILKernelInfo(F._perspective_image_pil), test_marks=[ - *xfails_pil_if_fill_sequence_needs_broadcast, xfail_jit_python_scalar_arg("fill"), ], ), diff --git a/torchvision/transforms/_functional_pil.py b/torchvision/transforms/_functional_pil.py index 120998d0072..277848224ac 100644 --- a/torchvision/transforms/_functional_pil.py +++ b/torchvision/transforms/_functional_pil.py @@ -264,11 +264,13 @@ def _parse_fill( if isinstance(fill, (int, float)) and num_channels > 1: fill = tuple([fill] * num_channels) if isinstance(fill, (list, tuple)): - if len(fill) != num_channels: + if len(fill) == 1: + fill = fill * num_channels + elif len(fill) != num_channels: msg = "The number of elements in 'fill' does not match the number of channels of the image ({} != {})" raise ValueError(msg.format(len(fill), num_channels)) - fill = tuple(fill) + fill = tuple(fill) # type: ignore[arg-type] if img.mode != "F": if isinstance(fill, (list, tuple)):