Skip to content

Commit

Permalink
Merge branch 'main' into encoder
Browse files Browse the repository at this point in the history
  • Loading branch information
xytintel authored Nov 11, 2024
2 parents 199b4ce + fa35a4d commit 637460f
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 9 deletions.
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ cmake_minimum_required(VERSION 3.13 FATAL_ERROR)

set(PROJECT_NAME "torch-xpu-ops")
set(PROJECT_VERSION "2.3.0")
# Avoid SYCL compiler error
string(APPEND CMAKE_CXX_FLAGS " -Wno-error=comment")

cmake_policy(SET CMP0048 NEW)
project(${PROJECT_NAME} VERSION "${PROJECT_VERSION}" LANGUAGES C CXX)

Expand Down
10 changes: 1 addition & 9 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -2688,16 +2688,8 @@
"nn/test_pruning_xpu.py": None,

"test_foreach_xpu.py": (
# CPU fallback fails. Implementation difference between CPU and CUDA. Expect success on CPU and expect fail on CUDA. When we use CPU fallback and align expected fail list with CUDA, these cases fail.
"test_binary_op_with_scalar_self_support__foreach_pow_is_fastpath_True_xpu_bool",
# AssertionError: RuntimeError not raised
# https://github.com/intel/torch-xpu-ops/issues/784
"test_0dim_tensor_overload_exception_xpu",
# RuntimeError: Tried to instantiate dummy base class CUDAGraph
"test_big_num_tensors__foreach_max_use_cuda_graph_True_xpu_float32",
"test_big_num_tensors__foreach_max_use_cuda_graph_True_xpu_float64",
"test_big_num_tensors__foreach_norm_use_cuda_graph_True_xpu_float32",
"test_big_num_tensors__foreach_norm_use_cuda_graph_True_xpu_float64",
"use_cuda_graph_True",
),

"nn/test_convolution_xpu.py": (
Expand Down

0 comments on commit 637460f

Please sign in to comment.