Skip to content

Commit

Permalink
Revert "Added new test sample to interpolate op in OpInfo (pytorch#10…
Browse files Browse the repository at this point in the history
…4181)"

This reverts commit 87f8bc6.

Reverted pytorch#104181 on behalf of https://github.com/peterbell10 due to Causing OOM in slow-gradcheck ([comment](pytorch#104181 (comment)))
  • Loading branch information
pytorchmergebot committed Oct 3, 2023
1 parent 40be6b7 commit df3ab70
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 82 deletions.
1 change: 0 additions & 1 deletion test/functorch/test_aotdispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2916,7 +2916,6 @@ def forward(self, x):
decorate('svd_lowrank', decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)})),
decorate('linalg.householder_product', decorator=unittest.skipIf(IS_MACOS and IS_X86, 'flaky')),
decorate('linalg.pinv', 'singular', decorator=toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)})),
decorate('nn.functional.interpolate', 'bicubic', decorator=toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-05)})),
# conv2d sometimes nondeterministic in this config?
decorate('nn.functional.conv2d', decorator=unittest.skipIf(IS_ARM64, "flaky")),
}
Expand Down
7 changes: 0 additions & 7 deletions test/inductor/test_torchinductor_opinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,13 +350,6 @@ def wrapper_noop_set_seed(op, *args, **kwargs):
("special.log_ndtr", "cuda", f64): {"atol": 1e-6, "rtol": 1e-5},
("std_mean.unbiased", "cuda", f16): {"reference_in_float": True},
("uniform", "cuda"): {"reference_in_float": True},
# Temporarily skip interpolate bilinear and bicubic tests:
"nn.functional.interpolate.bicubic": {
"assert_equal": False,
"check_gradient": False,
},
"nn.functional.interpolate.bilinear": {"assert_equal": False},
"nn.functional.upsample_bilinear": {"assert_equal": False},
}

# Always test with all sample for following ops
Expand Down
14 changes: 0 additions & 14 deletions test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,9 +384,6 @@ def mps_ops_modifier(ops):

# cpu not giving nan for x/0.0
'atan2': [torch.bool, torch.float16, torch.int16, torch.int32, torch.int64, torch.uint8, torch.int8],

# inconsistency errors between cpu and mps, max seen atol is 2
'nn.functional.interpolatebilinear': [torch.uint8],
}

MACOS_BEFORE_13_3_XFAILLIST = {
Expand Down Expand Up @@ -434,8 +431,6 @@ def mps_ops_modifier(ops):
MACOS_AFTER_13_1_XFAILLIST = {
# before macOS 13.2 it falls back to cpu and pass the forward pass
'grid_sampler_2d': [torch.float32], # Unsupported Border padding mode
# inconsistency errors between cpu and mps, max seen atol is 2
'nn.functional.interpolatebilinear': [torch.uint8],
}

MACOS_13_3_XFAILLIST = {
Expand Down Expand Up @@ -10992,12 +10987,6 @@ def get_samples():
elif op.name in ["pow", "__rpow__"]:
atol = 1e-6
rtol = 4e-6
elif op.name == "nn.functional.interpolate":
atol = 1e-3
rtol = 1e-4
elif op.name == "nn.functional.upsample_bilinear" and dtype == torch.uint8:
atol = 1.0
rtol = 0.0
else:
atol = None
rtol = None
Expand Down Expand Up @@ -11057,9 +11046,6 @@ def get_samples():
rtol = 1.5e-3
elif op.name == "unique" and cpu_kwargs["sorted"] is False:
continue
elif op.name == "nn.functional.interpolate":
atol = 1e-3
rtol = 1e-4
else:
atol = None
rtol = None
Expand Down
78 changes: 18 additions & 60 deletions torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -4293,53 +4293,22 @@ def shape(size, rank, with_batch_channel=True):
return tuple([N, C] + ([size] * rank))
return tuple([size] * rank)

if mode in ('bilinear', 'bicubic'):
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
# we pick more realistic upper bound 256 instead of default 10 for uint8 dtype
high=256 if dtype == torch.uint8 else None,
)
# provide a single sample for more typical image processing usage
rank = 2
yield SampleInput(
make_arg(shape(400, rank)),
shape(270, rank, False),
scale_factor=None,
mode=mode,
align_corners=False,
)

make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)

for align_corners in align_corners_options:
for rank in ranks_for_mode[mode]:
yield SampleInput(
make_arg(shape(D, rank)),
shape(S, rank, False),
scale_factor=None,
mode=mode,
align_corners=align_corners,
)
yield SampleInput(
make_arg(shape(D, rank)),
shape(L, rank, False),
scale_factor=None,
mode=mode,
align_corners=align_corners,
)
yield SampleInput(make_arg(shape(D, rank)),
shape(S, rank, False), None, mode, align_corners)
yield SampleInput(make_arg(shape(D, rank)),
shape(L, rank, False), None, mode, align_corners)
for recompute_scale_factor in [False, True]:
for scale_factor in [1.7, 0.6]:
yield SampleInput(
make_arg(shape(D, rank)),
size=None,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor,
)
yield SampleInput(make_arg(shape(D, rank)),
None, 1.7, mode, align_corners,
recompute_scale_factor=recompute_scale_factor)
yield SampleInput(make_arg(shape(D, rank)),
None, 0.6, mode, align_corners,
recompute_scale_factor=recompute_scale_factor)

def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs):
N, C = 2, 3
Expand All @@ -4357,20 +4326,8 @@ def shape(size, rank, with_batch_channel=True):
return torch.Size([N, C] + ([size] * rank))
return torch.Size([size] * rank)

if mode in ('bilinear', ):
make_arg = partial(
make_tensor,
device=device,
dtype=dtype,
requires_grad=requires_grad,
# we pick more realistic upper bound 256 instead of default 10 for uint8 dtype
high=256 if dtype == torch.uint8 else None,
)
# provide a single sample for more typical image processing usage
rank = 2
yield SampleInput(make_arg(shape(400, rank)), size=shape(270, rank, False))

make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
make_arg = partial(make_tensor, device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)

for rank in ranks_for_mode[mode]:
yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False))
Expand All @@ -4379,15 +4336,16 @@ def shape(size, rank, with_batch_channel=True):
yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6)


def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs):
def sample_inputs_upsample_aten(mode, self, device, dtype, requires_grad, **kwargs):
N = 6
C = 3
H = 10
W = 20
S = 3
L = 5

input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad)
input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)

yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None)
yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None)
Expand Down Expand Up @@ -13017,7 +12975,7 @@ def reference_flatten(input, start_dim=0, end_dim=-1):
dtypes=floating_types_and(torch.uint8),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'),
sample_inputs_func=partial(sample_inputs_upsample_aten, 'bilinear'),
supports_out=False,
skips=(
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
Expand Down

0 comments on commit df3ab70

Please sign in to comment.