diff --git a/test/Polar/api.jl b/test/Polar/api.jl index 0c6d5691..c5e5dc28 100644 --- a/test/Polar/api.jl +++ b/test/Polar/api.jl @@ -135,8 +135,7 @@ function test_polar_api(polar, device, M) ## Cost Production cost_production = ExaPF.CostFunction(polar) - c2 = CUDA.@allowscalar cost_production(stack)[1] - @test isa(c2, Real) + @test isa(cost_production(stack), M) # Test display println(devnull, cost_production) println(devnull, basis) diff --git a/test/Polar/second_order.jl b/test/Polar/second_order.jl index 947d55f1..ede47d6e 100644 --- a/test/Polar/second_order.jl +++ b/test/Polar/second_order.jl @@ -121,7 +121,7 @@ function test_full_space_hessian(polar, device, MT) Hd = FiniteDiff.finite_difference_jacobian(grad_fd_x, x) # Test that both Hessian match - if isa(device, ROCBackend) + if startswith(string(device), "ROCBackend") @test_broken myisapprox(Hd, H, rtol=1e-5) else @test myisapprox(Hd, H, rtol=1e-5) diff --git a/test/quickstart.jl b/test/quickstart.jl index 001e8fb5..7a7c3da1 100644 --- a/test/quickstart.jl +++ b/test/quickstart.jl @@ -72,7 +72,7 @@ const LS = ExaPF.LinearSolvers @test convergence.n_iterations == 5 @test convergence.norm_residuals <= pf_algo.tol - if CUDA.has_cuda_gpu() + if test_cuda polar_gpu = ExaPF.PolarForm(pf, CUDABackend()) stack_gpu = ExaPF.NetworkStack(polar_gpu) diff --git a/test/runtests.jl b/test/runtests.jl index d1b9ca57..f064b841 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,8 +1,7 @@ -using Test - -using Random using LinearAlgebra +using Random using SparseArrays +using Test using AMDGPU using CUDA @@ -16,15 +15,21 @@ const BENCHMARK_DIR = joinpath(dirname(@__FILE__), "..", "benchmark") const EXAMPLES_DIR = joinpath(dirname(@__FILE__), "..", "examples") const CASES = ["case9.m", "case30.m"] -# ARCHS = Any[(CPU(), Array, SparseMatrixCSC)] -ARCHS = [] -if CUDA.has_cuda() +is_package_installed(name::String) = !isnothing(Base.find_package(name)) + +ARCHS = Any[(CPU(), Array, SparseMatrixCSC)] + +test_cuda = CUDA.functional() +test_rocm = AMDGPU.functional() + +# Setup CUDA +if test_cuda using CUDA.CUSPARSE CUDA.allowscalar(false) CUDA_ARCH = (CUDABackend(), CuArray, CuSparseMatrixCSR) push!(ARCHS, CUDA_ARCH) end -if AMDGPU.has_rocm_gpu() +if test_rocm using AMDGPU.rocSPARSE AMDGPU.allowscalar(false) ROC_ARCH = (ROCBackend(), ROCArray, ROCSparseMatrixCSR)