Skip to content

Commit

Permalink
Enable instrumentation/test_gpuhello.py for XPU (#1676)
Browse files Browse the repository at this point in the history
Closes #1297

Signed-off-by: Anatoly Myachev <[email protected]>
  • Loading branch information
anmyachev authored Jul 22, 2024
1 parent 0f48a0d commit ac9126d
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 7 deletions.
9 changes: 9 additions & 0 deletions .github/workflows/build-test-reusable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,12 @@ jobs:
run: |
source ./scripts/pytest-utils.sh
ensure_spirv_dis
SHARED_LIB_DIR="${GITHUB_WORKSPACE}/python/build/$(ls python/build | grep -i lib)/triton/_C"
if [ ! -d "${SHARED_LIB_DIR}" ]; then
echo "Coult not find '${SHARED_LIB_DIR}'" ; exit -1
fi
cd python/test/unit
TRITON_TEST_SUITE=language \
pytest -vvv -n 8 --device xpu language/ --ignore=language/test_line_info.py --ignore=language/test_subprocess.py
Expand All @@ -184,6 +190,9 @@ jobs:
TRITON_DISABLE_LINE_INFO=0 TRITON_TEST_SUITE=line_info \
pytest -vvv --device xpu language/test_line_info.py
TRITON_ALWAYS_COMPILE=1 TRITON_DISABLE_LINE_INFO=0 LLVM_PASS_PLUGIN_PATH=${SHARED_LIB_DIR}/libGPUHello.so \
pytest -vvv --device xpu instrumentation/test_gpuhello.py
- name: Clear cache
run: |
rm -rf ~/.triton
Expand Down
14 changes: 7 additions & 7 deletions python/test/unit/instrumentation/test_gpuhello.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
import torch
import intel_extension_for_pytorch # type: ignore # noqa: F401

import pytest
import os

import triton
import triton.language as tl

test_stdout = 'Hello From First Instruction of GPU Kernel: kernel1\ttest_gpuhello.py:17:4\n\
Hello From First Instruction of GPU Kernel: kernel2\ttest_gpuhello.py:23:4\n\
Hello From First Instruction of GPU Kernel: kernel3\ttest_gpuhello.py:29:4\n'
test_stdout = 'Hello From First Instruction of GPU Kernel: kernel1\ttest_gpuhello.py:18:4\n\
Hello From First Instruction of GPU Kernel: kernel2\ttest_gpuhello.py:24:4\n\
Hello From First Instruction of GPU Kernel: kernel3\ttest_gpuhello.py:30:4\n'


@pytest.mark.parametrize(None, [None])
Expand All @@ -31,18 +32,17 @@ def kernel3(BLOCK_SIZE: tl.constexpr):

def func(x: torch.Tensor, y: torch.Tensor):
output = torch.empty_like(x)
assert x.is_cuda and y.is_cuda and output.is_cuda
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
kernel1[grid](BLOCK_SIZE=1024)
kernel2[grid](BLOCK_SIZE=1024)
kernel3[grid](BLOCK_SIZE=1024)


def test_op(capfd):
def test_op(capfd, device: str):
size = 98432
x = torch.rand(size, device='cuda')
y = torch.rand(size, device='cuda')
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
func(x, y)
stdout, stderr = capfd.readouterr()
if 'LLVM_PASS_PLUGIN_PATH' in os.environ:
Expand Down

0 comments on commit ac9126d

Please sign in to comment.