Skip to content

Commit

Permalink
[UT] Improve pass rate (#1090)
Browse files Browse the repository at this point in the history
- Unskip passing tests
- Mark expected failures as xfail

Pass rate: 98.4%->98.65%

Signed-off-by: Whitney Tsang <[email protected]>
  • Loading branch information
whitneywhtsang authored May 11, 2024
1 parent 2758215 commit e47fd95
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 15 deletions.
2 changes: 1 addition & 1 deletion python/test/unit/language/test_conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def test_typeconvert_upcast(src_dtype, dst_dtype, device):
pytest.skip(f"{src_dtype} upcast tests not supported on ROCm")

if src_dtype in ('float8e4b8', 'float8e5b16') and (is_cuda() or not is_on_mi300()):
pytest.skip("{src_dtype} upcast tests only supported on AMDGPU MI300")
pytest.xfail("{src_dtype} upcast tests only supported on AMDGPU MI300")

# dtype : (exponent_bits, mantissa_bits, exponent_bias, max_repr)
stuff = {
Expand Down
13 changes: 8 additions & 5 deletions python/test/unit/language/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1385,10 +1385,13 @@ def test_atomic_rmw(op, dtype_x_str, mode, sem, device):
check_type_supported(dtype_x_str, device)
if is_interpreter():
if dtype_x_str == 'float16':
pytest.skip("Only test atomic float16 ops on GPU")
if is_xpu():
if dtype_x_str == 'float16' and (mode != "min_neg" or sem != "acquire"):
pytest.skip("FIXME: Atomic RMW for float16 not yet supported by IGC")
pytest.xfail("Only test atomic float16 ops on GPU")

if torch.cuda.is_available():
capability = torch.cuda.get_device_capability()
if capability[0] < 7:
if dtype_x_str == 'float16':
pytest.skip("Only test atomic float16 ops on devices with sm >= 70")

n_programs = 5

Expand Down Expand Up @@ -3400,7 +3403,7 @@ def kernel(
def test_max_num_imprecise_acc(device):

if not hasattr(torch, 'float8_e5m2'):
pytest.skip(f"torch {torch.__version__} does not support float8_e5m2")
pytest.xfail(f"torch {torch.__version__} does not support float8_e5m2")

if is_cuda():
capability = torch.cuda.get_device_capability()
Expand Down
5 changes: 2 additions & 3 deletions python/test/unit/operators/test_flash_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,8 @@ def test_op(Z, H, N_CTX, D_HEAD, dtype, causal, seq_par, device):
if dtype == torch.bfloat16 and os.environ.get("TRITON_INTERPRET", "0") == "1":
pytest.xfail("Flash attention bfloat16 not supported in interpreter mode")

if device == "xpu":
if D_HEAD > 32:
pytest.skip("FIXME: results precision issue")
if device == "xpu" and D_HEAD >= 64:
pytest.skip("FIXME: results precision issue")

# Pytorch does not support Half data type for matmul operation hence the skip
if device == 'cpu':
Expand Down
9 changes: 3 additions & 6 deletions python/test/unit/runtime/test_bindings.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import triton
import triton.language as tl

import pytest
import torch


Expand Down Expand Up @@ -33,8 +32,6 @@ def test_module_walk():
Test the MLIR bindings exposed for the out-ot-tree walk.
"""

pytest.skip("FIXME: Incorrect results on XPU")

def walk_fn(op):
name = op.get_name()
for i in range(op.get_num_results()):
Expand All @@ -55,10 +52,10 @@ def walk_fn(op):

kernel = add_kernel
args = [
torch.empty((32, 32), device="cuda"), # in_ptr0
torch.empty((32, 32), device="cuda"), # in_ptr1
torch.empty((32, 32), device="xpu"), # in_ptr0
torch.empty((32, 32), device="xpu"), # in_ptr1
1024, # n_elements
torch.empty((32, 32), device="cuda"), # out_ptr
torch.empty((32, 32), device="xpu"), # out_ptr
16, # BLOCK_SIZE
]
src = triton.compiler.compiler.ASTSource(
Expand Down

0 comments on commit e47fd95

Please sign in to comment.