Skip to content

Commit

Permalink
Fix num inputs
Browse files Browse the repository at this point in the history
  • Loading branch information
xuzhao9 committed Nov 9, 2024
1 parent 7c84b23 commit 0c98e95
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 3 deletions.
6 changes: 4 additions & 2 deletions test/test_gpu/skip_tests_h100_pytorch.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
# Tests we skip in OSS CI
# This file is regarding to the Triton version bundled with pytorch
# Use <op-name> to skip an entire operator
# Use <op-name/impl-name> to skip an impl
# Use <op-name:> to skip an entire operator
# Use <op-name:\n - impl-name> to skip an impl
bf16xint16_gemm:
- bf16xint16
flash_attention:
- triton_tutorial_flash_v2_tma
fp8_attention:
- colfax_fmha
fp8_fused_quant_gemm_rowwise:
Expand Down
2 changes: 1 addition & 1 deletion tritonbench/operators/ragged_attention/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(
self.max_seq_len = 2**args.max_seq_len_log2
self.num_buckets = args.num_buckets
# set a default number of inputs
self._num_inputs = 10
self._num_inputs = 10 if self._num_inputs is None else self._num_inputs

@register_benchmark()
def hstu_triton_ragged_attention(self, qkv, seq_offsets, timestamps):
Expand Down

0 comments on commit 0c98e95

Please sign in to comment.