Skip to content

Commit

Permalink
fix tests if AMDGPU is not available
Browse files Browse the repository at this point in the history
  • Loading branch information
frapac committed Dec 4, 2023
1 parent 3909c82 commit baed622
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 18 deletions.
2 changes: 0 additions & 2 deletions test/Polar/TestPolarForm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ module TestPolarFormulation

using Test

using AMDGPU
using CUDA
using FiniteDiff
using KernelAbstractions
using LinearAlgebra
Expand Down
3 changes: 1 addition & 2 deletions test/Polar/api.jl
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,7 @@ function test_polar_api(polar, device, M)

## Cost Production
cost_production = ExaPF.CostFunction(polar)
c2 = CUDA.@allowscalar cost_production(stack)[1]
@test isa(c2, Real)
@test isa(cost_production(stack), M)
# Test display
println(devnull, cost_production)
println(devnull, basis)
Expand Down
4 changes: 2 additions & 2 deletions test/Polar/second_order.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ function test_hessprod_with_finitediff(polar, device, MT; rtol=1e-6, atol=1e-6)
proj_fd = zeros(nx+nu)
mul!(proj_fd, H_fd, tgt)

if isa(device, ROCBackend)
if startswith(string(device), "ROCBackend")
@test_broken myisapprox(projp, proj_fd, rtol=rtol)
else
@test myisapprox(projp, proj_fd, rtol=rtol)
Expand Down Expand Up @@ -121,7 +121,7 @@ function test_full_space_hessian(polar, device, MT)
Hd = FiniteDiff.finite_difference_jacobian(grad_fd_x, x)

# Test that both Hessian match
if isa(device, ROCBackend)
if startswith(string(device), "ROCBackend")
@test_broken myisapprox(Hd, H, rtol=1e-5)
else
@test myisapprox(Hd, H, rtol=1e-5)
Expand Down
4 changes: 1 addition & 3 deletions test/quickstart.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
using Test
using AMDGPU
using CUDA
using KernelAbstractions

using ExaPF
Expand Down Expand Up @@ -72,7 +70,7 @@ const LS = ExaPF.LinearSolvers
@test convergence.n_iterations == 5
@test convergence.norm_residuals <= pf_algo.tol

if CUDA.has_cuda_gpu()
if test_cuda
polar_gpu = ExaPF.PolarForm(pf, CUDABackend())
stack_gpu = ExaPF.NetworkStack(polar_gpu)

Expand Down
32 changes: 23 additions & 9 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
using Test

using Random
using LinearAlgebra
using Pkg
using Random
using SparseArrays
using Test

using AMDGPU
using CUDA
using KernelAbstractions

using ExaPF
Expand All @@ -16,15 +14,31 @@ const BENCHMARK_DIR = joinpath(dirname(@__FILE__), "..", "benchmark")
const EXAMPLES_DIR = joinpath(dirname(@__FILE__), "..", "examples")
const CASES = ["case9.m", "case30.m"]

# ARCHS = Any[(CPU(), Array, SparseMatrixCSC)]
ARCHS = []
if CUDA.has_cuda()
is_package_installed(name::String) = !isnothing(Base.find_package(name))

ARCHS = Any[(CPU(), Array, SparseMatrixCSC)]

test_cuda = if is_package_installed("CUDA")
using CUDA
CUDA.has_cuda_gpu()
else
false
end
test_rocm = if is_package_installed("AMDGPU")
using AMDGPU
AMDGPU.has_rocm_gpu()
else
false
end

# Setup CUDA
if test_cuda
using CUDA.CUSPARSE
CUDA.allowscalar(false)
CUDA_ARCH = (CUDABackend(), CuArray, CuSparseMatrixCSR)
push!(ARCHS, CUDA_ARCH)
end
if AMDGPU.has_rocm_gpu()
if test_rocm
using AMDGPU.rocSPARSE
AMDGPU.allowscalar(false)
ROC_ARCH = (ROCBackend(), ROCArray, ROCSparseMatrixCSR)
Expand Down

0 comments on commit baed622

Please sign in to comment.