From e9f728cf381845b370b4175aa58d3b1a13525731 Mon Sep 17 00:00:00 2001 From: Xu Zhao Date: Fri, 8 Nov 2024 20:00:17 -0500 Subject: [PATCH] Fix PR test --- .github/workflows/linter.yaml | 2 +- .github/workflows/pr.yaml | 4 ++-- pyproject.toml | 4 ++++ test/test_gpu/skip_tests_h100_pytorch.yaml | 15 ++++++++++++++- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index 6fe9b747..f9f2eeb9 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -19,7 +19,7 @@ jobs: path: tritonbench - name: Install deps run: | - pip install ruff-api + pip install ruff-api==0.1.0 - name: Check Formatting uses: omnilib/ufmt@action-v1 with: diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 9ce8c168..717320b3 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -27,9 +27,9 @@ jobs: sudo nvidia-smi -pm 1 sudo ldconfig nvidia-smi - - name: Test Tritonbench operators + - name: Test Tritonbench operators on H100 GPU run: | - bash ./.ci/tritonbench/test-operators.sh + bash ./.ci/tritonbench/test-gpu.sh concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} diff --git a/pyproject.toml b/pyproject.toml index d13664e8..c8630b05 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,7 @@ [tool.ufmt] formatter = "ruff-api" excludes = ["submodules/"] + +[tool.black] +line-length = 88 +target-version = ["py312"] diff --git a/test/test_gpu/skip_tests_h100_pytorch.yaml b/test/test_gpu/skip_tests_h100_pytorch.yaml index 6119dc9d..d867a00d 100644 --- a/test/test_gpu/skip_tests_h100_pytorch.yaml +++ b/test/test_gpu/skip_tests_h100_pytorch.yaml @@ -2,10 +2,23 @@ # This file is regarding to the Triton version bundled with pytorch # Use to skip an entire operator # Use to skip an impl -- test_op - bf16xint16_gemm/bf16xint16 - fp8_attention/colfax_fmha - fp8_fused_quant_gemm_rowwise - fp8_gemm/triton_persistent_fp8_gemm - fp8_gemm/triton_tma_persistent_fp8_gemm - fp8_gemm_rowwise +- gemm +- grouped_gemm +- int4_gemm +- jagged_layer_norm +- jagged_mean +- jagged_softmax +- jagged_sum +- layer_norm +- low_mem_dropout +- rms_norm +- rope +- swiglu +- template_attention +- test_op