From 0d702d3b04c53d0e8be9bb8ae7ed59dc29ab797a Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Mon, 3 Jul 2023 10:58:22 -0700 Subject: [PATCH] [fbsync] remove obsolete transforms tests (#7678) Reviewed By: vmoens Differential Revision: D47186581 fbshipit-source-id: b7a147e32fa281fb29aef68319e25cb7491855b7 --- test/test_functional_tensor.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 0e1cc648a19..de9d10d6bde 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -609,21 +609,6 @@ def test_resize_antialias(device, dt, size, interpolation): assert_equal(resized_tensor, resize_result) -@needs_cuda -@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC]) -def test_assert_resize_antialias(interpolation): - - # Checks implementation on very large scales - # and catch TORCH_CHECK inside PyTorch implementation - torch.manual_seed(12) - tensor, _ = _create_data(1000, 1000, device="cuda") - - # Error message is not yet updated in pytorch nightly - # with pytest.raises(RuntimeError, match=r"Provided interpolation parameters can not be handled"): - with pytest.raises(RuntimeError, match=r"Too much shared memory required"): - F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True) - - def test_resize_antialias_default_warning(): img = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8) @@ -641,25 +626,6 @@ def test_resize_antialias_default_warning(): F.resized_crop(img, 0, 0, 10, 10, size=(20, 20), interpolation=NEAREST) -@pytest.mark.parametrize("device", cpu_and_gpu()) -@pytest.mark.parametrize("dt", [torch.float32, torch.float64, torch.float16]) -@pytest.mark.parametrize("size", [[10, 7], [10, 42], [42, 7]]) -@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC]) -def test_interpolate_antialias_backward(device, dt, size, interpolation): - - if dt == torch.float16 and device == "cpu": - # skip float16 on CPU case - return - - torch.manual_seed(12) - x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),) - resize = partial(F.resize, size=size, interpolation=interpolation, antialias=True) - assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) - - x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),) - assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False) - - def check_functional_vs_PIL_vs_scripted( fn, fn_pil, fn_t, config, device, dtype, channels=3, tol=2.0 + 1e-10, agg_method="max" ):