From df3ab70dde65397017c6d0280495bc8df3d82933 Mon Sep 17 00:00:00 2001 From: PyTorch MergeBot Date: Tue, 3 Oct 2023 18:07:02 +0000 Subject: [PATCH] Revert "Added new test sample to interpolate op in OpInfo (#104181)" This reverts commit 87f8bc65f8cbc3202d645cdfa80a206b564276ac. Reverted https://github.com/pytorch/pytorch/pull/104181 on behalf of https://github.com/peterbell10 due to Causing OOM in slow-gradcheck ([comment](https://github.com/pytorch/pytorch/pull/104181#issuecomment-1745472323)) --- test/functorch/test_aotdispatch.py | 1 - test/inductor/test_torchinductor_opinfo.py | 7 -- test/test_mps.py | 14 ---- .../_internal/common_methods_invocations.py | 78 +++++-------------- 4 files changed, 18 insertions(+), 82 deletions(-) diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py index da32833d11849d..da92db710b934c 100644 --- a/test/functorch/test_aotdispatch.py +++ b/test/functorch/test_aotdispatch.py @@ -2916,7 +2916,6 @@ def forward(self, x): decorate('svd_lowrank', decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)})), decorate('linalg.householder_product', decorator=unittest.skipIf(IS_MACOS and IS_X86, 'flaky')), decorate('linalg.pinv', 'singular', decorator=toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)})), - decorate('nn.functional.interpolate', 'bicubic', decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)})), # conv2d sometimes nondeterministic in this config? decorate('nn.functional.conv2d', decorator=unittest.skipIf(IS_ARM64, "flaky")), } diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py index fa45a4a8618e35..cade0bc1c58c8c 100644 --- a/test/inductor/test_torchinductor_opinfo.py +++ b/test/inductor/test_torchinductor_opinfo.py @@ -350,13 +350,6 @@ def wrapper_noop_set_seed(op, *args, **kwargs): ("special.log_ndtr", "cuda", f64): {"atol": 1e-6, "rtol": 1e-5}, ("std_mean.unbiased", "cuda", f16): {"reference_in_float": True}, ("uniform", "cuda"): {"reference_in_float": True}, - # Temporarily skip interpolate bilinear and bicubic tests: - "nn.functional.interpolate.bicubic": { - "assert_equal": False, - "check_gradient": False, - }, - "nn.functional.interpolate.bilinear": {"assert_equal": False}, - "nn.functional.upsample_bilinear": {"assert_equal": False}, } # Always test with all sample for following ops diff --git a/test/test_mps.py b/test/test_mps.py index 776280ce55e5fb..abbb2d15700454 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -384,9 +384,6 @@ def mps_ops_modifier(ops): # cpu not giving nan for x/0.0 'atan2': [torch.bool, torch.float16, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8], - - # inconsistency errors between cpu and mps, max seen atol is 2 - 'nn.functional.interpolatebilinear': [torch.uint8], } MACOS_BEFORE_13_3_XFAILLIST = { @@ -434,8 +431,6 @@ def mps_ops_modifier(ops): MACOS_AFTER_13_1_XFAILLIST = { # before macOS 13.2 it falls back to cpu and pass the forward pass 'grid_sampler_2d': [torch.float32], # Unsupported Border padding mode - # inconsistency errors between cpu and mps, max seen atol is 2 - 'nn.functional.interpolatebilinear': [torch.uint8], } MACOS_13_3_XFAILLIST = { @@ -10992,12 +10987,6 @@ def get_samples(): elif op.name in ["pow", "__rpow__"]: atol = 1e-6 rtol = 4e-6 - elif op.name == "nn.functional.interpolate": - atol = 1e-3 - rtol = 1e-4 - elif op.name == "nn.functional.upsample_bilinear" and dtype == torch.uint8: - atol = 1.0 - rtol = 0.0 else: atol = None rtol = None @@ -11057,9 +11046,6 @@ def get_samples(): rtol = 1.5e-3 elif op.name == "unique" and cpu_kwargs["sorted"] is False: continue - elif op.name == "nn.functional.interpolate": - atol = 1e-3 - rtol = 1e-4 else: atol = None rtol = None diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 8e3fe8e76e6648..a5e06ba4ec7347 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -4293,53 +4293,22 @@ def shape(size, rank, with_batch_channel=True): return tuple([N, C] + ([size] * rank)) return tuple([size] * rank) - if mode in ('bilinear', 'bicubic'): - make_arg = partial( - make_tensor, - device=device, - dtype=dtype, - requires_grad=requires_grad, - # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype - high=256 if dtype == torch.uint8 else None, - ) - # provide a single sample for more typical image processing usage - rank = 2 - yield SampleInput( - make_arg(shape(400, rank)), - shape(270, rank, False), - scale_factor=None, - mode=mode, - align_corners=False, - ) - - make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-1, high=1) for align_corners in align_corners_options: for rank in ranks_for_mode[mode]: - yield SampleInput( - make_arg(shape(D, rank)), - shape(S, rank, False), - scale_factor=None, - mode=mode, - align_corners=align_corners, - ) - yield SampleInput( - make_arg(shape(D, rank)), - shape(L, rank, False), - scale_factor=None, - mode=mode, - align_corners=align_corners, - ) + yield SampleInput(make_arg(shape(D, rank)), + shape(S, rank, False), None, mode, align_corners) + yield SampleInput(make_arg(shape(D, rank)), + shape(L, rank, False), None, mode, align_corners) for recompute_scale_factor in [False, True]: - for scale_factor in [1.7, 0.6]: - yield SampleInput( - make_arg(shape(D, rank)), - size=None, - scale_factor=scale_factor, - mode=mode, - align_corners=align_corners, - recompute_scale_factor=recompute_scale_factor, - ) + yield SampleInput(make_arg(shape(D, rank)), + None, 1.7, mode, align_corners, + recompute_scale_factor=recompute_scale_factor) + yield SampleInput(make_arg(shape(D, rank)), + None, 0.6, mode, align_corners, + recompute_scale_factor=recompute_scale_factor) def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 @@ -4357,20 +4326,8 @@ def shape(size, rank, with_batch_channel=True): return torch.Size([N, C] + ([size] * rank)) return torch.Size([size] * rank) - if mode in ('bilinear', ): - make_arg = partial( - make_tensor, - device=device, - dtype=dtype, - requires_grad=requires_grad, - # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype - high=256 if dtype == torch.uint8 else None, - ) - # provide a single sample for more typical image processing usage - rank = 2 - yield SampleInput(make_arg(shape(400, rank)), size=shape(270, rank, False)) - - make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-1, high=1) for rank in ranks_for_mode[mode]: yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False)) @@ -4379,7 +4336,7 @@ def shape(size, rank, with_batch_channel=True): yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6) -def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs): +def sample_inputs_upsample_aten(mode, self, device, dtype, requires_grad, **kwargs): N = 6 C = 3 H = 10 @@ -4387,7 +4344,8 @@ def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs S = 3 L = 5 - input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad) + input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, + requires_grad=requires_grad, low=-1, high=1) yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None) yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None) @@ -13017,7 +12975,7 @@ def reference_flatten(input, start_dim=0, end_dim=-1): dtypes=floating_types_and(torch.uint8), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, - sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'), + sample_inputs_func=partial(sample_inputs_upsample_aten, 'bilinear'), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),