Skip to content

Commit

Permalink
[fbsync] remove obsolete transforms tests (#7678)
Browse files Browse the repository at this point in the history
Reviewed By: vmoens

Differential Revision: D47186581

fbshipit-source-id: b7a147e32fa281fb29aef68319e25cb7491855b7
  • Loading branch information
NicolasHug authored and facebook-github-bot committed Jul 3, 2023
1 parent b1b69d6 commit 0d702d3
Showing 1 changed file with 0 additions and 34 deletions.
34 changes: 0 additions & 34 deletions test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,21 +609,6 @@ def test_resize_antialias(device, dt, size, interpolation):
assert_equal(resized_tensor, resize_result)


@needs_cuda
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
def test_assert_resize_antialias(interpolation):

# Checks implementation on very large scales
# and catch TORCH_CHECK inside PyTorch implementation
torch.manual_seed(12)
tensor, _ = _create_data(1000, 1000, device="cuda")

# Error message is not yet updated in pytorch nightly
# with pytest.raises(RuntimeError, match=r"Provided interpolation parameters can not be handled"):
with pytest.raises(RuntimeError, match=r"Too much shared memory required"):
F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)


def test_resize_antialias_default_warning():

img = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8)
Expand All @@ -641,25 +626,6 @@ def test_resize_antialias_default_warning():
F.resized_crop(img, 0, 0, 10, 10, size=(20, 20), interpolation=NEAREST)


@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize("size", [[10, 7], [10, 42], [42, 7]])
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
def test_interpolate_antialias_backward(device, dt, size, interpolation):

if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return

torch.manual_seed(12)
x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),)
resize = partial(F.resize, size=size, interpolation=interpolation, antialias=True)
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)

x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),)
assert torch.autograd.gradcheck(resize, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)


def check_functional_vs_PIL_vs_scripted(
fn, fn_pil, fn_t, config, device, dtype, channels=3, tol=2.0 + 1e-10, agg_method="max"
):
Expand Down

0 comments on commit 0d702d3

Please sign in to comment.