Skip to content

Commit

Permalink
Various version bumps (#198)
Browse files Browse the repository at this point in the history
* Various version bumps
* Julia 1.6-1.7, CUDA.jl 3.4
* This requires the 'CUDA.Mem.DeviceBuffer' as a parameter type to
CuArray to be fully resolved
  • Loading branch information
michel2323 authored Aug 26, 2021
1 parent 5b98666 commit 84a7fec
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 21 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04]
julia-version: ['1.6']
julia-version: ['1.6', '1.7.0-beta3']
julia-arch: [x64]

steps:
Expand All @@ -36,7 +36,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04]
julia-version: ['1.6']
julia-version: ['1.6', '1.7.0-beta3']
julia-arch: [x64]

steps:
Expand Down
8 changes: 3 additions & 5 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ version = "0.5.0"

[deps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
ExprTools = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
Expand All @@ -19,17 +18,16 @@ SparseDiffTools = "47a9eef4-7e08-11e9-0b38-333d64bd3804"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"

[compat]
CUDA = "~2.6, ~3.2, ~3.3"
ExprTools = "= 0.1.3"
CUDA = "3.4"
FiniteDiff = "2.7"
ForwardDiff = "0.10"
KernelAbstractions = "0.6, 0.7"
KernelAbstractions = "0.7"
Krylov = "~0.7.3"
LightGraphs = "1.3"
Metis = "1"
SparseDiffTools = "1"
TimerOutputs = "0.5"
julia = "^1.5"
julia = "1.7"

[extras]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Expand Down
4 changes: 2 additions & 2 deletions src/LinearSolvers/LinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ function DQGMRES(J::AbstractSparseMatrix;
P=BlockJacobiPreconditioner(J), memory=4, verbose=false
)
n, m = size(J)
S = isa(J, CUSPARSE.CuSparseMatrixCSR) ? CuVector{Float64} : Vector{Float64}
S = isa(J, CUSPARSE.CuSparseMatrixCSR) ? CuArray{Float64, 1, CUDA.Mem.DeviceBuffer} : Vector{Float64}
solver = Krylov.DqgmresSolver(n, m, memory, S)
return DQGMRES(solver, P, memory, verbose)
end
Expand Down Expand Up @@ -274,7 +274,7 @@ function KrylovBICGSTAB(J::AbstractSparseMatrix;
P=BlockJacobiPreconditioner(J), verbose=0, rtol=1e-10, atol=1e-10
)
n, m = size(J)
S = isa(J, CUSPARSE.CuSparseMatrixCSR) ? CuVector{Float64} : Vector{Float64}
S = isa(J, CUSPARSE.CuSparseMatrixCSR) ? CuArray{Float64, 1, CUDA.Mem.DeviceBuffer} : Vector{Float64}
solver = Krylov.BicgstabSolver(n, m, S)
return KrylovBICGSTAB(solver, P, verbose, atol, rtol)
end
Expand Down
8 changes: 4 additions & 4 deletions src/LinearSolvers/preconditioners.jl
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ struct BlockJacobiPreconditioner{AT,GAT,VI,GVI,MT,GMT,MI,GMI,SMT} <: AbstractPre
SMT = SparseMatrixCSC{Float64,Int64}
elseif isa(device, GPU)
AT = Array{Float64,3}
GAT = CuArray{Float64,3}
GAT = CuArray{Float64, 3, CUDA.Mem.DeviceBuffer}
VI = Vector{Int64}
GVI = CuVector{Int64}
GVI = CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}
MT = Matrix{Float64}
GMT = CuMatrix{Float64}
GMT = CuArray{Float64, 2, CUDA.Mem.DeviceBuffer}
MI = Matrix{Int64}
GMI = CuMatrix{Int64}
GMI = CuArray{Int64, 2, CUDA.Mem.DeviceBuffer}
SMT = CUDA.CUSPARSE.CuSparseMatrixCSR{Float64}
J = SparseMatrixCSC(J)
else
Expand Down
4 changes: 2 additions & 2 deletions src/Polar/polar.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ function PolarForm(pf::PS.PowerNetwork, device::KA.Device)
M = SparseMatrixCSC
AT = Array
elseif isa(device, KA.GPU)
IT = CUDA.CuVector{Int64}
VT = CUDA.CuVector{Float64}
IT = CUDA.CuArray{Int64, 1, CUDA.Mem.DeviceBuffer}
VT = CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}
M = CUSPARSE.CuSparseMatrixCSR
AT = CUDA.CuArray
end
Expand Down
12 changes: 6 additions & 6 deletions src/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ mutable struct Spmat{VTI<:AbstractVector, VTF<:AbstractVector}
end

mutable struct BatchCuSparseMatrixCSR{Tv} <: CUSPARSE.AbstractCuSparseMatrix{Tv}
rowPtr::CUDA.CuVector{Cint}
colVal::CUDA.CuVector{Cint}
nzVal::CUDA.CuMatrix{Tv}
rowPtr::CUDA.CuArray{Cint, 1, CUDA.Mem.DeviceBuffer}
colVal::CUDA.CuArray{Cint, 1, CUDA.Mem.DeviceBuffer}
nzVal::CUDA.CuArray{Tv, 2, CUDA.Mem.DeviceBuffer}
dims::NTuple{2,Int}
nnz::Cint
nbatch::Int

function BatchCuSparseMatrixCSR{Tv}(rowPtr::CUDA.CuVector{<:Integer}, colVal::CUDA.CuVector{<:Integer},
function BatchCuSparseMatrixCSR{Tv}(rowPtr::CUDA.CuArray{<:Integer, 1, CUDA.Mem.DeviceBuffer}, colVal::CUDA.CuArray{<:Integer, 1, CUDA.Mem.DeviceBuffer},
nzVal::CUDA.CuMatrix, dims::NTuple{2,<:Integer}, nnzJ::Int, nbatch::Int) where Tv
new(rowPtr, colVal, nzVal, dims, nnzJ, nbatch)
end
Expand Down Expand Up @@ -119,8 +119,8 @@ end
# Source code taken from:
# https://github.com/JuliaGPU/CUDA.jl/blob/master/lib/cusolver/wrappers.jl#L78L111
function csclsvqr!(A::CUSPARSE.CuSparseMatrixCSC{Float64},
b::CUDA.CuVector{Float64},
x::CUDA.CuVector{Float64},
b::CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer},
x::CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer},
tol::Float64,
reorder::Cint,
inda::Char)
Expand Down

0 comments on commit 84a7fec

Please sign in to comment.