From f2567445b94930c11e4ead6a498c001b64bdf470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Pacaud?= Date: Fri, 23 Jul 2021 11:38:33 -0500 Subject: [PATCH] Remove all Evaluators from ExaPF (#191) * remove Evaluators from ExaPF * LS: add explicit rdiv! for transpose solve on GPU * update documentation and remove deprecated scripts * remove cusolverRF from tests --- .github/workflows/action.yml | 2 - Project.toml | 2 - README.md | 45 +- benchmark/batch_hessian.jl | 87 --- benchmark/benchmarks.jl | 19 +- benchmark/evaluators.jl | 175 ------ deps/deps.jl | 5 - docs/make.jl | 8 +- docs/src/index.md | 14 +- docs/src/lib/evaluators.md | 98 ---- docs/src/man/evaluators.md | 238 -------- scripts/dommel.jl | 228 -------- scripts/ffwu.jl | 300 ---------- src/Evaluators/Evaluators.jl | 255 --------- src/Evaluators/MOI_wrapper.jl | 106 ---- src/Evaluators/auglag.jl | 201 ------- src/Evaluators/bridge_evaluator.jl | 151 ------ src/Evaluators/common.jl | 163 ------ src/Evaluators/derivatives.jl | 0 src/Evaluators/feasibility_evaluator.jl | 106 ---- src/Evaluators/optimizers.jl | 67 --- src/Evaluators/penalty.jl | 53 -- src/Evaluators/proxal_evaluators.jl | 341 ------------ src/Evaluators/reduced_evaluator.jl | 694 ------------------------ src/Evaluators/slack_evaluator.jl | 279 ---------- src/ExaPF.jl | 6 +- src/LinearSolvers/LinearSolvers.jl | 10 + src/Polar/polar.jl | 8 +- src/architectures.jl | 5 +- test/Evaluators/MOI_wrapper.jl | 55 -- test/Evaluators/TestEvaluators.jl | 95 ---- test/Evaluators/api.jl | 184 ------- test/Evaluators/auglag.jl | 72 --- test/Evaluators/interface.jl | 96 ---- test/Evaluators/powerflow.jl | 21 - test/Evaluators/proxal_evaluator.jl | 84 --- test/Evaluators/test_rgm.jl | 47 -- test/Polar/TestPolarForm.jl | 3 +- test/Polar/api.jl | 22 + test/cusolver.jl | 28 - test/runtests.jl | 21 +- 41 files changed, 66 insertions(+), 4328 deletions(-) delete mode 100644 benchmark/batch_hessian.jl delete mode 100644 benchmark/evaluators.jl delete mode 100644 deps/deps.jl delete mode 100644 docs/src/lib/evaluators.md delete mode 100644 docs/src/man/evaluators.md delete mode 100644 scripts/dommel.jl delete mode 100644 scripts/ffwu.jl delete mode 100644 src/Evaluators/Evaluators.jl delete mode 100644 src/Evaluators/MOI_wrapper.jl delete mode 100644 src/Evaluators/auglag.jl delete mode 100644 src/Evaluators/bridge_evaluator.jl delete mode 100644 src/Evaluators/common.jl delete mode 100644 src/Evaluators/derivatives.jl delete mode 100644 src/Evaluators/feasibility_evaluator.jl delete mode 100644 src/Evaluators/optimizers.jl delete mode 100644 src/Evaluators/penalty.jl delete mode 100644 src/Evaluators/proxal_evaluators.jl delete mode 100644 src/Evaluators/reduced_evaluator.jl delete mode 100644 src/Evaluators/slack_evaluator.jl delete mode 100644 test/Evaluators/MOI_wrapper.jl delete mode 100644 test/Evaluators/TestEvaluators.jl delete mode 100644 test/Evaluators/api.jl delete mode 100644 test/Evaluators/auglag.jl delete mode 100644 test/Evaluators/interface.jl delete mode 100644 test/Evaluators/powerflow.jl delete mode 100644 test/Evaluators/proxal_evaluator.jl delete mode 100644 test/Evaluators/test_rgm.jl delete mode 100644 test/cusolver.jl diff --git a/.github/workflows/action.yml b/.github/workflows/action.yml index 2a2c7aca..049430aa 100644 --- a/.github/workflows/action.yml +++ b/.github/workflows/action.yml @@ -22,7 +22,6 @@ jobs: - uses: julia-actions/setup-julia@latest with: version: ${{ matrix.julia-version }} - - run: julia --project deps/deps.jl - uses: julia-actions/julia-buildpkg@latest - uses: julia-actions/julia-runtest@latest @@ -44,6 +43,5 @@ jobs: with: version: ${{ matrix.julia-version }} arch: ${{ matrix.julia-arch }} - - run: julia --project deps/deps.jl - uses: julia-actions/julia-buildpkg@latest - uses: julia-actions/julia-runtest@latest diff --git a/Project.toml b/Project.toml index 90d3766b..65d72f50 100644 --- a/Project.toml +++ b/Project.toml @@ -12,7 +12,6 @@ KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" LightGraphs = "093fc24a-ae57-5d10-9952-331d41423f4d" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" Metis = "2679e427-3c69-5b7f-982b-ece356f1e94b" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -27,7 +26,6 @@ ForwardDiff = "0.10" KernelAbstractions = "0.6, 0.7" Krylov = "~0.7.3" LightGraphs = "1.3" -MathOptInterface = "0.9" Metis = "1" SparseDiffTools = "1" TimerOutputs = "0.5" diff --git a/README.md b/README.md index af55ec9b..8006596d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ExaPF -[![][docs-latest-img]][docs-latest-url] ![CI](https://github.com/exanauts/ExaPF.jl/workflows/Run%20tests/badge.svg?branch=master) +[![][docs-latest-img]][docs-latest-url] ![CI](https://github.com/exanauts/ExaPF.jl/workflows/Run%20tests/badge.svg?branch=master) [docs-latest-img]: https://img.shields.io/badge/docs-latest-blue.svg [docs-latest-url]: https://exanauts.github.io/ExaPF.jl/ @@ -49,49 +49,6 @@ Iteration 4. Residual norm: 3.9111e-12. ExaPF.ConvergenceStatus(true, 4, 3.911102241031109e-12, 0) ``` -### How to solve the optimal power flow in the reduced space? - -ExaPF implements a wrapper to [MathOptInterface](https://github.com/jump-dev/MathOptInterface.jl) -that allows to solve the optimal power flow problem directly in the reduced space -induced by the power flow equations: - -```julia -julia> case = "case57.m" -# Instantiate a ReducedSpaceEvaluator object -julia> nlp = ExaPF.ReducedSpaceEvaluator(datafile) -# MOI optimizer -julia> optimizer = Ipopt.Optimizer() -# Use LBFGS algorithm, as reduced Hessian is not available by default! -julia> MOI.set(optimizer, MOI.RawParameter("hessian_approximation"), "limited-memory") -julia> MOI.set(optimizer, MOI.RawParameter("tol"), 1e-4) -julia> solution = ExaPF.optimize!(optimizer, nlp) -Total number of variables............................: 10 - variables with only lower bounds: 0 - variables with lower and upper bounds: 10 - variables with only upper bounds: 0 -Total number of equality constraints.................: 0 -Total number of inequality constraints...............: 58 - inequality constraints with only lower bounds: 0 - inequality constraints with lower and upper bounds: 58 - inequality constraints with only upper bounds: 0 - - -Number of Iterations....: 9 - - (scaled) (unscaled) -Objective...............: 1.9630480251946040e+03 3.7589338203438238e+04 -Dual infeasibility......: 2.5545890554923290e-05 4.8916435433709606e-04 -Constraint violation....: 4.7695181137896725e-13 4.7695181137896725e-13 -Complementarity.........: 1.0270912626531211e-11 1.9667211572084318e-10 -Overall NLP error.......: 2.5545890554923290e-05 4.8916435433709606e-04 - -[...] -Total CPU secs in IPOPT (w/o function evaluations) = 0.049 -Total CPU secs in NLP function evaluations = 0.023 - -EXIT: Optimal Solution Found. -``` - ## Development We welcome any contribution to ExaPF! Bug fixes or feature requests diff --git a/benchmark/batch_hessian.jl b/benchmark/batch_hessian.jl deleted file mode 100644 index d19eb9da..00000000 --- a/benchmark/batch_hessian.jl +++ /dev/null @@ -1,87 +0,0 @@ -using LinearAlgebra -using SparseArrays -using KernelAbstractions -using BenchmarkTools - -using ExaPF -import ExaPF: LinearSolvers - -const LS = LinearSolvers - -function build_batch_nlp(datafile, device, nbatch) - print("Load data\t") - polar = @time PolarForm(datafile, device) - - constraints = Function[ - ExaPF.voltage_magnitude_constraints, - ExaPF.active_power_constraints, - ExaPF.reactive_power_constraints, - ] - powerflow_solver = NewtonRaphson(tol=1e-10) - - J = ExaPF.jacobian_sparsity(polar, ExaPF.power_balance, State()) - if isa(device, CPU) - lufac = lu(J) - linear_solver = LS.DirectSolver(lufac) - if nbatch == 1 - hessian_lagrangian = ExaPF.HessianLagrangian(polar, lufac, lufac') - else - hessian_lagrangian = ExaPF.BatchHessianLagrangian(polar, lufac, lufac', nbatch) - end - else - gJ = CuSparseMatrixCSR(J) - lufac = CUSOLVERRF.CusolverRfLU(gJ) - linear_solver = LS.DirectSolver(lufac) - if nbatch == 1 - blufac = CUSOLVERRF.CusolverRfLU(gJ) - badjlu = CUSOLVERRF.CusolverRfLU(CuSparseMatrixCSC(J)) - hessian_lagrangian = ExaPF.HessianLagrangian(polar, blufac, badjlu) - else - blufac = CUSOLVERRF.CusolverRfLUBatch(gJ, nbatch) - badjlu = CUSOLVERRF.CusolverRfLUBatch(CuSparseMatrixCSC(J), nbatch) - hessian_lagrangian = ExaPF.BatchHessianLagrangian(polar, blufac, badjlu, nbatch) - end - end - - nlp = @time ExaPF.ReducedSpaceEvaluator(polar; constraints=constraints, - linear_solver=linear_solver, - powerflow_solver=powerflow_solver, - hessian_lagrangian=hessian_lagrangian) - return nlp -end - -function run_batch_hessian(nlp) - @assert !isnothing(nlp.hesslag) - nbatch = ExaPF.n_batches(nlp.hesslag) - # Update nlp to stay on manifold - u = ExaPF.initial(nlp) - n = ExaPF.n_variables(nlp) - print("Update \t") - @time ExaPF.update!(nlp, u) - # Compute objective - print("Objective\t") - c = @btime ExaPF.objective($nlp, $u) - # Compute gradient of objective - g = similar(u) - fill!(g, 0) - print("Gradient \t") - @btime ExaPF.gradient!($nlp, $g, $u) - - if nbatch == 1 - hv = similar(u) ; fill!(hv, 0) - v = similar(u) ; fill!(v, 0) - v[1] = 1.0 - else - hv = similar(u, n, nbatch) ; fill!(hv, 0) - v = similar(u, n, nbatch) ; fill!(v, 0) - v[1, :] .= 1.0 - end - print("Hessprod \t") - @btime ExaPF.hessprod_!($nlp, $hv, $u, $v) - y = similar(nlp.g_min) ; fill!(y, 1.0) - w = similar(nlp.g_min) ; fill!(w, 1.0) - print("HLagPen-prod \t") - @btime ExaPF.hessian_lagrangian_penalty_prod_!($nlp, $hv, $u, $y, 1.0, $v, $w) - return -end - diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 88bbdcfa..caab5da8 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -53,15 +53,18 @@ function run_benchmark(datafile, device, linsolver) algo = linsolver(J; P=precond) powerflow_solver = NewtonRaphson(tol=ntol) - nlp = ExaPF.ReducedSpaceEvaluator(polar; - powerflow_solver=powerflow_solver) - nlp.linear_solver = algo - convergence = ExaPF.update!(nlp, u0) - ExaPF.reset!(nlp) - convergence = ExaPF.update!(nlp, u0) - ExaPF.reset!(nlp) + + # Init variables + buffer = get(polar, ExaPF.PhysicalState()) + jx = AutoDiff.Jacobian(polar, ExaPF.power_balance, State()) + + # Warmstart + ExaPF.init_buffer!(polar, buffer) + ExaPF.powerflow(polar, jx, buffer, powerflow_solver; linear_solver=algo) + TimerOutputs.reset_timer!(ExaPF.TIMER) - convergence = ExaPF.update!(nlp, u0) + ExaPF.init_buffer!(polar, buffer) + convergence = ExaPF.powerflow(polar, jx, buffer, powerflow_solver; linear_solver=algo) # Make sure we are converged @assert(convergence.has_converged) diff --git a/benchmark/evaluators.jl b/benchmark/evaluators.jl deleted file mode 100644 index b3bd411a..00000000 --- a/benchmark/evaluators.jl +++ /dev/null @@ -1,175 +0,0 @@ -using LinearAlgebra -using SparseArrays -using KernelAbstractions -using BenchmarkTools - -using ExaPF -import ExaPF: LinearSolvers - -const LS = LinearSolvers - -function build_nlp(datafile, device) - print("Load data\t") - polar = @time PolarForm(datafile, device) - - constraints = Function[ - ExaPF.voltage_magnitude_constraints, - ExaPF.active_power_constraints, - ExaPF.reactive_power_constraints, - ] - print("Constructor\t") - powerflow_solver = NewtonRaphson(tol=1e-10) - nlp = @time ExaPF.ReducedSpaceEvaluator(polar; constraints=constraints, - powerflow_solver=powerflow_solver) - return nlp -end - -function run_reduced_evaluator(nlp, u; device=CPU()) - n = length(u) - # Update nlp to stay on manifold - print("Update \t") - @time ExaPF.update!(nlp, u) - # Compute objective - print("Objective\t") - c = @btime ExaPF.objective($nlp, $u) - # Compute gradient of objective - g = similar(u) - fill!(g, 0) - print("Gradient \t") - @btime ExaPF.gradient!($nlp, $g, $u) - - # Constraint - ## Evaluation of the constraints - cons = similar(nlp.g_min) - fill!(cons, 0) - print("Constrt \t") - @btime ExaPF.constraint!($nlp, $cons, $u) - - print("Jac-prod \t") - jv = copy(g) ; fill!(jv, 0) - v = copy(cons) ; fill!(v, 1) - @btime ExaPF.jtprod!($nlp, $jv, $u, $v) - hv = similar(u) ; fill!(hv, 0) - v = similar(u) ; fill!(v, 0) - v[1] = 1 - print("Hessprod \t") - @btime ExaPF.hessprod!($nlp, $hv, $u, $v) - y = similar(cons) ; fill!(y, 1.0) - w = similar(cons) ; fill!(w, 1.0) - print("HLagPen-prod \t") - @btime ExaPF.hessian_lagrangian_penalty_prod!($nlp, $hv, $u, $y, 1.0, $w, $v) - print("Hessian \t") - H = similar(u, n, n) - @btime ExaPF.hessian!($nlp, $H, $u) - return -end - -function run_slack(nlp, u; device=CPU()) - slack = ExaPF.SlackEvaluator(nlp) - w = ExaPF.initial(slack) - print("Update \t") - @time ExaPF.update!(slack, w) - print("Objective\t") - c = @time ExaPF.objective(slack, w) - g = similar(w) - fill!(g, 0) - print("Gradient \t") - @time ExaPF.gradient!(slack, g, w) - cons = similar(nlp.g_min) - fill!(cons, 0) - print("Constrt \t") - @time ExaPF.constraint!(slack, cons, w) - - hv = similar(w) ; fill!(hv, 0) - v = similar(w) ; fill!(v, 1) - print("Hessprod \t") - @time ExaPF.hessprod!(slack, hv, w, v) - y = similar(cons) ; fill!(y, 1.0) - z = similar(cons) ; fill!(w, 1.0) - print("HLagPen-prod \t") - @time ExaPF.hessian_lagrangian_penalty_prod!(slack, hv, w, y, 1.0, v, z) - return -end - -function run_penalty(nlp, u; device=CPU()) - pen = ExaPF.AugLagEvaluator(nlp, u) - print("Update \t") - @time ExaPF.update!(pen, u) - print("Objective\t") - c = @time ExaPF.objective(pen, u) - g = similar(u) - fill!(g, 0) - print("Gradient \t") - @time ExaPF.gradient!(pen, g, u) - hv = similar(u) ; fill!(hv, 0) - v = similar(u) ; fill!(v, 1) - print("Hessprod \t") - @time ExaPF.hessprod!(pen, hv, u, v) - return -end - -function run_slackaug(nlp, u; device=CPU()) - nlp2 = ExaPF.SlackEvaluator(nlp) - w = ExaPF.initial(nlp2) - pen = ExaPF.AugLagEvaluator(nlp2, w) - print("Update \t") - @time ExaPF.update!(pen, w) - print("Objective\t") - c = @time ExaPF.objective(pen, w) - g = similar(w) - fill!(g, 0) - print("Gradient \t") - @time ExaPF.gradient!(pen, g, w) - hv = similar(w) ; fill!(hv, 0) - v = similar(w) ; fill!(v, 1) - print("Hessprod \t") - @time ExaPF.hessprod!(pen, hv, w, v) - return -end - -function run_proxal(nlp, u; device=CPU()) - pen = ExaPF.ProxALEvaluator(nlp, ExaPF.Normal) - w = ExaPF.initial(pen) - print("Update \t") - @time ExaPF.update!(pen, w) - print("Objective\t") - c = @time ExaPF.objective(pen, w) - g = similar(w) - fill!(g, 0) - print("Gradient \t") - @time ExaPF.gradient!(pen, g, w) - return -end - -function run_proxaug(nlp, u; device=CPU()) - pen = ExaPF.ProxALEvaluator(nlp, ExaPF.Normal) - w = ExaPF.initial(pen) - aug = ExaPF.AugLagEvaluator(pen, w) - print("Update \t") - @time ExaPF.update!(aug, w) - print("Objective\t") - c = @time ExaPF.objective(aug, w) - g = similar(w) - fill!(g, 0) - print("Gradient \t") - @time ExaPF.gradient!(aug, g, w) - return -end - -datafile = joinpath(dirname(@__FILE__), "..", "data", "case300.m") -device = CPU() -nlp = build_nlp(datafile, CPU()) -u = ExaPF.initial(nlp) - -@info("ReducedSpaceEvaluator") -run_reduced_evaluator(nlp, u, device=CPU()) -ExaPF.reset!(nlp) -# @info("SlackEvaluator") -# run_slack(nlp, u, device=CPU()) -# ExaPF.reset!(nlp) -# @info("AugLagEvaluator") -# run_penalty(nlp, u, device=CPU()) -# ExaPF.reset!(nlp) -# @info("SlackAugLagEvaluator") -# run_slackaug(nlp, u, device=CPU()) -# ExaPF.reset!(nlp) diff --git a/deps/deps.jl b/deps/deps.jl deleted file mode 100644 index 1c1bfaa8..00000000 --- a/deps/deps.jl +++ /dev/null @@ -1,5 +0,0 @@ -# This file pulls the dependencies using git branches for development - -using Pkg -cusolverrf = PackageSpec(url="https://github.com/exanauts/BlockPowerFlow.jl.git", rev="master") -Pkg.add([cusolverrf]) diff --git a/docs/make.jl b/docs/make.jl index 7810988c..7fd6d683 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,8 +1,8 @@ using Pkg -Pkg.develop(PackageSpec(path=joinpath(dirname(@__FILE__), ".."))) -# when first running instantiate -Pkg.instantiate() +# Pkg.develop(PackageSpec(path=joinpath(dirname(@__FILE__), ".."))) +# # when first running instantiate +# Pkg.instantiate() using Documenter, ExaPF @@ -25,14 +25,12 @@ makedocs( "Linear Solver" => "man/linearsolver.md", "PowerSystem" => "man/powersystem.md", "Formulations" => "man/formulations.md", - "Evaluators" => "man/evaluators.md", ], "Library" => [ "AutoDiff" => "lib/autodiff.md", "Linear Solver" => "lib/linearsolver.md", "PowerSystem" => "lib/powersystem.md", "Formulations" => "lib/formulations.md", - "Evaluators" => "lib/evaluators.md", ] ] ) diff --git a/docs/src/index.md b/docs/src/index.md index d0dfaea9..43ea0eb2 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,14 +1,14 @@ # ExaPF [`ExaPF.jl`](https://github.com/exanauts/ExaPF.jl) is a -package to solve the power flow problem on upcoming exascale architectures. +package to solve the power flow problem on upcoming exascale architectures. On these architectures the computational performance can only be achieved through graphics processing units (GPUs) as these systems lack substantial computational performance through classical CPUs. [`ExaPF.jl`](https://github.com/exanauts/ExaPF.jl) aims to provide the sensitivity information required for a reduced space optimization method, and enabling the computation of the optimal power flow problem (OPF) fully on GPUs. Reduced space methods enforce the constraints, represented here by the power flow's (PF) system of nonlinear equations, separately at each -iteration of the optimization in the reduced space. +iteration of the optimization in the reduced space. This includes the computation of second-order derivatives using automatic differentiation, an iterative linear solver with a preconditioner, and a Newton-Raphson implementation. All of these steps allow us to run the main @@ -18,13 +18,11 @@ We leverage the packages [`CUDA.jl`](https://github.com/JuliaGPU/CUDA.jl) and [` [autodiff](man/autodiff.md) and [linear solver](man/linearsolver.md) illustrate the design overview of [`ExaPF.jl`](https://github.com/exanauts/ExaPF.jl) targeted for GPUs. -The user API is separated into three layers: +The user API is separated into two layers: -1. First layer: Physical layer, specify the power network topology in [powersystem](man/powersystem.md) -2. Second layer: Interface between power network and NLE or NLP in [formulations](lib/formulations.md) -3. Third layer: Evaluators for nonlinear problems +1. First layer: Physical layer, specify the power network topology in [powersystem](man/powersystem.md). The first layer provides the physical properties at the electrical engineering level. +2. Second layer: Mathematical layer, using a [polar formulation](lib/formulations.md) to model the equations of the network. -The third layer is for numerical optimization whereas the first layer provides the physical properties at the electrical engineering level. ## Table of contents @@ -44,7 +42,6 @@ Pages = [ "man/linearsolver.md", "man/powersystem.md", "man/formulations.md", - "man/evaluators.md", ] Depth = 1 ``` @@ -57,7 +54,6 @@ Pages = [ "lib/linearsolver.md", "lib/powersystem.md", "lib/formulations.md", - "lib/evaluators.md", ] Depth = 1 ``` diff --git a/docs/src/lib/evaluators.md b/docs/src/lib/evaluators.md deleted file mode 100644 index 90ceb9c2..00000000 --- a/docs/src/lib/evaluators.md +++ /dev/null @@ -1,98 +0,0 @@ -```@meta -CurrentModule = ExaPF -``` - -# Evaluators - -## Description - -```@docs -AbstractNLPEvaluator -``` - - -## API Reference - -### Optimization -```@docs -optimize! -``` - -### Attributes -```@docs -Variables -Constraints -n_variables -n_constraints -constraints_type - -``` - -### Utilities - -```@docs -reset! -primal_infeasibility -primal_infeasibility! -``` - -## Callbacks - -### Objective - -```@docs -gradient! -hessprod! -hessian! - -``` - -### Constraints - -```@docs -constraint! -jacobian_structure! -jacobian! -jprod! -jtprod! -ojtprod! -``` - -### Second-order - -```@docs -hessian_lagrangian_penalty_prod! - -``` - -## ReducedSpaceEvaluator -When working in the reduced space, we could use -the corresponding `ReducedSpaceEvaluator`: -```@docs -ReducedSpaceEvaluator -``` - -## SlackEvaluator -```@docs -SlackEvaluator -``` - -## AugLagEvaluator - -```@docs -AugLagEvaluator -``` - -## MOIEvaluator -The bridge to MathOptInterface is encoded by -the `MOIEvaluator` structure: -```@docs -MOIEvaluator -``` - -## ProxALEvaluator - -```@docs -ProxALEvaluator -``` - diff --git a/docs/src/man/evaluators.md b/docs/src/man/evaluators.md deleted file mode 100644 index fc7164dc..00000000 --- a/docs/src/man/evaluators.md +++ /dev/null @@ -1,238 +0,0 @@ -# Evaluators - -In `ExaPF.jl`, the evaluators are the final layer of the structure. -They take as input a given [`ExaPF.AbstractFormulation`](@ref) and implement the -callbacks for the optimization solvers. - -## Overview of the AbstractNLPEvaluator - -An [`ExaPF.AbstractNLPEvaluator`](@ref) implements an optimization problem -associated with an underlying [`ExaPF.AbstractFormulation`](@ref): -```math -\begin{aligned} -\min_{u \in \mathbb{R}^n} \; & f(u) \\ -\text{subject to} \quad & g(u) = 0 \\ - & h(u) \leq 0. -\end{aligned} -``` -with $f: \mathbb{R}^n \to \mathbb{R}$ the objective function, -$g: \mathbb{R}^n \to \mathbb{R}^{m_E}$ non-linear equality constraints and -$h: \mathbb{R}^n \to \mathbb{R}^{m_I}$ non-linear inequality constraints. - -### Callbacks - -Most non-linear optimization algorithms rely on *callbacks* to pass -information about the structure of the problem to the optimizer. -In `ExaPF`, the implementation of the evaluators allows to have a proper splitting -between the model (formulated in the [`ExaPF.AbstractFormulation`](@ref) layer) -and the optimization algorithms. By design, the implementation -of an [`ExaPF.AbstractNLPEvaluator`](@ref) shares a similar spirit with the implementations -introduced in other packages, as - -- MathOptInterface.jl's [AbstractNLPEvaluator](https://jump.dev/MathOptInterface.jl/stable/apireference/#MathOptInterface.AbstractNLPEvaluator) -- NLPModels' [AbstractNLPModel](https://juliasmoothoptimizers.github.io/NLPModels.jl/stable/api/#AbstractNLPModel-functions) - -Internally, the evaluator caches all the information needed to evaluate -the callbacks (e.g. the polar representation of the problem, with voltage -magnitudes and angles). This cache allows to reduce the number of memory allocations to -its minimum. -Once a new variable $u$ passed to the evaluator -a function `ExaPF.update!` is being called to update the cache, -according to the model specified in the underlying [`ExaPF.AbstractFormulation`](@ref). -Denoting by `nlp` an instance of AbstractNLPEvaluator, the cache is -updated via -```julia-repl -julia> ExaPF.update!(nlp, u) -``` - -Once the internal structure updated, we are ready to call the different -callbacks, in every order. For instance, computing the objective, the -gradient and the constraints amounts to -```julia-repl -# Objective -julia> obj = ExaPF.objective(nlp, u) -# Gradient -julia> g = zeros(n_variables(nlp)) -julia> ExaPF.gradient!(nlp, g, u) -# Constraints -julia> cons = zeros(n_constraints(nlp)) -julia> ExaPF.constraint!(nlp, cons, u) - -``` - - -## A journey to the reduced space with the ReducedSpaceEvaluator - -When we aim at optimizing the problem directly in the powerflow -manifold, the [`ExaPF.ReducedSpaceEvaluator`](@ref) is our workhorse. -We recall that the powerflow manifold is defined implicitly by the -powerflow equations: -```math - g(x(u), u) = 0. -``` -By design, the [`ExaPF.ReducedSpaceEvaluator`](@ref) works in the reduced -space $(x(u), u)$. Hence, the reduced optimization problem writes out -```math -\begin{aligned} -\min_{u \in \mathbb{R}^n} \; & f(x(u), u) \\ -\text{subject to} \quad & h(x(u), u) \leq 0. -\end{aligned} -``` -This formulation comes with two advantages: - -- if the dimension of the state is large, the reduced problem has - a lower dimension. -- the powerflow equality constraints $g(x, u) = 0$ disappear in the reduced problem. - - -### Playing with the ReducedSpaceEvaluator - -#### Constructor -To create a [`ExaPF.ReducedSpaceEvaluator`](@ref), we just need a polar formulation -`polar::PolarForm`: -```julia-repl -julia> nlp = ExaPF.ReducedSpaceEvaluator(polar) - -``` -or we could alternatively instantiate the evaluator directly from -a MATPOWER (or PSSE) instance: -```julia-repl -julia> datafile = "case9.m" -julia> nlp = ExaPF.ReducedSpaceEvaluator(datafile) -A ReducedSpaceEvaluator object - * device: KernelAbstractions.CPU() - * #vars: 5 - * #cons: 10 - * constraints: - - voltage_magnitude_constraints - - active_power_constraints - - reactive_power_constraints - * linear solver: ExaPF.LinearSolvers.DirectSolver() - -``` - -Let's describe the output of the last command. - -* `device: KernelAbstractions.CPU()`: the evaluator is instantiated on the CPU ; -* `#vars: 5`: it has 5 optimization variables ; -* `#cons: 10`: and 10 inequality constraints ; -* `constraints`: by default, `nlp` comes with three inequality constraints: `voltage_magnitude_constraints` (specifying the bounds $x_L \leq x(u) \leq x_U$ on the state $x$), `active_power_constraints` and `reactive_power_constraints` (bounding the active and reactive power of the generators) ; -* `linear solver`: [`ExaPF.LinearSolvers.DirectSolver`](@ref): to solve the linear systems, the evaluator uses a direct linear algebra solver. - -Of course, these settings are only specified by default. The user is free -to choose the parameters she wants. For instance, - -* We could remove all constraints by passing an empty array of constraints - to the evaluator: - ```julia-repl - julia> constraints = Function[] - julia> nlp = ExaPF.ReducedSpaceEvaluator(datafile; constraints=constraints) - ``` -* We could load the evaluator on the GPU simply by changing the `device` option: - ```julia-repl - julia> nlp = ExaPF.ReducedSpaceEvaluator(datafile; device=CUDADevice()) - ``` - - - -#### Caching - -To juggle between the mathematical description (characterized -by a state $x$ and a control $u$) and the physical description (characterized -by the voltage and power injection at each bus), the evaluator `nlp` -stores internally a cache `nlp.buffer`, with type `ExaPF.AbstractNetworkBuffer`. -```julia-repl -julia> buffer = get(nlp, ExaPF.PhysicalState()) -``` - -#### Evaluation of the callbacks - -Now that we have a `nlp` evaluator available, we can embed it in any -optimization routine. For instance, suppose we have a new control `uk` -available. First, we need to find the corresponding state `xk`, -such that ``g(x_k, u_k) = 0``. -In the evaluator's API, this sums up to: -```julia-repl -ExaPF.update!(nlp, uk) - -``` -The function `update!` will -- Feed the physical description `nlp.buffer` with the values stored in the new control `uk`. -- Solve the powerflow equations corresponding to the formulation specified in `form`. This operation - updates the cache `nlp.buffer` inplace. - -Once the function `update!` called (and only after that), we can evaluate -all the different callbacks, independently of one other. - -* Objective - ```julia-repl - julia> cost = ExaPF.objective(nlp, uk) - ``` -* Objective's gradient - ```julia-repl - julia> g = zeros(n_variables(nlp)) - julia> ExaPF.gradient!(nlp, g, uk) - ``` -* Constraints - ```julia-repl - # Evaluate constraints - julia> cons = zeros(n_constraints(nlp)) - julia> ExaPF.constraint!(nlp, cons, uk) - ``` -* Constraints' Jacobian - ```julia-repl - ## Evaluate Jacobian - julia> ExaPF.jacobian!(nlp, jac, uk) - ``` -* Constraints' Jacobian-vector product: - ```julia-repl - ## Evaluate Jacobian-vector product - julia> v = zeros(n_variables(nlp)) - julia> jv = zeros(n_constraints(nlp)) - julia> ExaPF.jprod!(nlp, jv, uk, v) - ``` -* Constraints' transpose Jacobian-vector product - ```julia-repl - ## Evaluate transpose Jacobian-vector product - julia> v = zeros(n_constraints(nlp)) - julia> jv = zeros(n_variables(nlp)) - julia> ExaPF.jtprod!(nlp, jv, uk, v) - ``` -* Hessian-vector product: - ```julia-repl - ## Evaluate transpose Jacobian-vector product - julia> v = zeros(n_variables(nlp)) - julia> hv = zeros(n_variables(nlp)) - julia> ExaPF.hessprod!(nlp, hv, uk, v) - ``` -* Hessian: - ```julia-repl - ## Evaluate transpose Jacobian-vector product - julia> H = zeros(n_variables(nlp), n_variables(nlp)) - julia> ExaPF.hessprod!(nlp, H, uk) - ``` - -!!! note - Once the powerflow equations solved in a `update!` call, the solution ``x_k`` is stored implicitly in `nlp.buffer`. These values will be used as a starting point for the next resolution of powerflow equations. - -## Passing the problem to an optimization solver with MathOptInterface - -`ExaPF.jl` provides a utility to pass the non-linear structure -specified by a [`ExaPF.AbstractNLPEvaluator`](@ref) to a `MathOptInterface` (MOI) -optimization problem. That allows to solve the corresponding -optimal power flow problem using any non-linear optimization solver compatible -with MOI. - -For instance, we can solve the reduced problem specified -in `nlp` with Ipopt. In a few lines of code: - -```julia -using Ipopt -optimizer = Ipopt.Optimizer() -MOI.set(optimizer, MOI.RawParameter("print_level"), 5) -MOI.set(optimizer, MOI.RawParameter("limited_memory_max_history"), 50) -MOI.set(optimizer, MOI.RawParameter("hessian_approximation"), "limited-memory") -solution = ExaPF.optimize!(optimizer, nlp) -MOI.empty!(optimizer) - -``` diff --git a/scripts/dommel.jl b/scripts/dommel.jl deleted file mode 100644 index 3487e5e4..00000000 --- a/scripts/dommel.jl +++ /dev/null @@ -1,228 +0,0 @@ - -using ExaPF -using FiniteDiff -using ForwardDiff -using LinearAlgebra -using Printf -using KernelAbstractions -using Statistics - -reldiff(a, b) = abs(a - b) / max(1, a) -function active_set(x, x♭, x♯; tol=1e-8) - are_min = findall(x .<= x♭ .+ tol) - are_max = findall(x .>= x♯ .- tol) - return vcat(are_min, are_max) -end - -function ls(algo, nlp, uk::Vector{Float64}, obj, grad::Vector{Float64}) - nᵤ = length(grad) - s = copy(-grad) - function Lalpha(alpha) - u_ = uk .+ alpha.*s - ExaPF.update!(nlp, u_) - return ExaPF.objective(nlp, u_) - end - function grad_Lalpha(alpha) - g_ = zeros(nᵤ) - u_ = uk .+ alpha .* s - ExaPF.update!(nlp, u_) - ExaPF.gradient!(nlp, g_, u_) - return dot(g_, s) - end - function Lgrad_Lalpha(alpha) - g_ = zeros(nᵤ) - u_ = uk .+ alpha .* s - ExaPF.update!(nlp, u_) - ExaPF.gradient!(nlp, g_, u_) - phi = ExaPF.objective(nlp, u_) - dphi = dot(g_, s) - return (phi, dphi) - end - dL_0 = dot(s, grad) - alpha, obj = algo(Lalpha, grad_Lalpha, Lgrad_Lalpha, 0.0002, obj, dL_0) - return alpha -end - -# sample along descent line and find minimum. -function sample_ls(nlp, uk, d, alpha_m; sample_max=30) - alpha = 0.0 - function cost_a(a) - ud = uk + a*d - ExaPF.update!(nlp, ud) - return ExaPF.objective(nlp, ud) - end - - alpha_vec = collect(range(0.1*alpha_m, stop=alpha_m, length=sample_max)) - f_vec = zeros(sample_max) - - for i=1:sample_max - a = alpha_vec[i] - f_vec[i] = cost_a(a) - end - - (val, ind) = findmin(f_vec) - - return alpha_vec[ind] -end - -# reduced gradient method -function dommel_method(datafile; bfgs=false, iter_max=200, itout_max=1, - feasible_start=false) - - # Load problem. - polar = PolarForm(datafile, CPU()) - - x0 = ExaPF.initial(polar, State()) - if feasible_start - prob = run_reduced_ipopt(datafile; hessian=false, feasible=true) - uk = prob.x - else - uk = ExaPF.initial(polar, Control()) - # uk = nlp.u_min - end - u0 = copy(uk) - u_start = copy(u0) - wk = copy(uk) - u_prev = copy(uk) - - buffer = ExaPF.get(polar, ExaPF.PhysicalState()) - nlp = ExaPF.ReducedSpaceEvaluator(polar; - powerflow_solver=NewtonRaphson(; tol=1e-10)) - # Init a penalty evaluator with initial penalty c₀ - c0 = 10.0 - - pen = ExaPF.AugLagEvaluator(nlp, u0; c₀=c0, scale=true) - ωtol = 1e-5 #1 / c0 - - # initialize arrays - grad = similar(uk) - # ut for line search - ut = similar(uk) - fill!(grad, 0) - grad_prev = copy(grad) - obj_prev = Inf - norm_grad = Inf - - outer_costs = Float64[] - cost_history = Float64[] - grad_history = Float64[] - - if bfgs - H = InverseLBFGSOperator(Float64, length(uk), 50, scaling=true) - α0 = 1.0 - else - H = I - α0 = 1e-6 # REF - # α0 = 1e-4 - end - αi = α0 - u♭ = nlp.u_min - u♯ = nlp.u_max - ls_itermax = 30 - β = 0.4 - τ = 1e-4 - - for i_out in 1:itout_max - iter = 1 - uk .= u_start - converged = false - # @printf("%6s %8s %4s %4s\n", "iter", "obj", "∇f", "αₗₛ") - # Inner iteration: projected gradient algorithm - n_iter = 0 - for i in 1:iter_max - n_iter += 1 - # solve power flow and compute gradients - ExaPF.update!(pen, uk) - - # evaluate cost - c = ExaPF.objective(pen, uk) - # Evaluate cost of problem without penalties - c_ref = pen.scaler.scale_obj * ExaPF.objective(pen.inner, uk) - ExaPF.gradient!(pen, grad, uk) - - # compute control step - # Armijo line-search (Bertsekas, 1976) - dk = H * grad - step = αi - for j_ls in 1:ls_itermax - step *= β - ExaPF.project!(ut, uk .- step .* dk, u♭, u♯) - ExaPF.update!(pen, ut) - ft = ExaPF.objective(pen, ut) - if ft <= c - τ * dot(dk, ut .- uk) - break - end - end - - # step = αi - wk .= uk .- step * dk - ExaPF.project!(uk, wk, u♭, u♯) - - # Stopping criteration: uₖ₊₁ - uₖ - ## Dual infeasibility - norm_grad = norm(uk .- u_prev, Inf) - ## Primal infeasibility - inf_pr = ExaPF.primal_infeasibility(pen.inner, pen.cons) - - # check convergence - if (iter%100 == 0) - @printf("%6d %.6e %.3e %.2e %.2e %.2e\n", i, c, c - c_ref, norm_grad, inf_pr, step) - end - iter += 1 - push!(grad_history, norm_grad) - push!(cost_history, c) - if bfgs - push!(H, uk .- u_prev, grad .- grad_prev) - end - grad_prev .= grad - u_prev .= uk - # Check whether we have converged nicely - if (norm_grad < ωtol - || (iter >= 4 && reldiff(c, mean(cost_history[end-2:end])) < 1e-7) - ) - converged = true - break - end - end - # Update penalty term, according to Nocedal & Wright §17.1 (p.501) - # Safeguard: update nicely the penalty if previously we failed to converge - if converged - ρ = 1e-6 - η = 10.0 - else - ρ = 1e-6 - η = 2.0 - end - # αi = max(2.0 / η * αi, 1e-8) - u_start .= ρ * u0 .+ (1 - ρ) .* uk - ωtol *= 1 / η - ωtol = max(ωtol, 1e-6) - - # Evaluate current position in the original space - cons = zeros(ExaPF.n_constraints(nlp)) - ExaPF.constraint!(nlp, cons, uk) - obj = ExaPF.objective(nlp, uk) - inf_pr = ExaPF.primal_infeasibility(nlp, cons) - push!(outer_costs, obj) - @printf("#Outer %d %-4d %.3e %.3e \n", - i_out, n_iter, obj, inf_pr) - if (norm_grad < 1e-6) && (inf_pr < 1e-8) - break - end - ExaPF.update_penalty!(pen; η=η) - end - # uncomment to plot cost evolution - # plt = lineplot(cost_history, title = "Cost history", width=80); - # println(plt) - - cons = zeros(ExaPF.n_constraints(nlp)) - ExaPF.constraint!(nlp, cons, uk) - ExaPF.sanity_check(nlp, uk, cons) - - return uk, cost_history -end - -datafile = joinpath(dirname(@__FILE__), "..", "data", "case57.m") - -u_opt, ch = dommel_method(datafile; bfgs=false, itout_max=10, feasible_start=false, - iter_max=1000) diff --git a/scripts/ffwu.jl b/scripts/ffwu.jl deleted file mode 100644 index 6dde3cce..00000000 --- a/scripts/ffwu.jl +++ /dev/null @@ -1,300 +0,0 @@ -# Implementation of ideas found in F.F. Wu's "Two stage" paper. -using Test -using ExaPF -using FiniteDiff -using ForwardDiff -using LinearAlgebra -using Printf -using KernelAbstractions - -import ExaPF: PowerSystem - -# This function computes the Davidon cubic interpolation as seen in: -# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.272&rep=rep1&type=pdf -# Notes: -# -# - In some cases v^2 - F0p*FMp < 0. What to do if we need to keep alpha_m?? -function davidon_ls(polar, xk, uk, p, delta_x, delta_u, alpha_m) - - F0 = ExaPF.cost_production(polar, xk, uk, p) - FM = ExaPF.cost_production(polar, xk + alpha_m*delta_x, uk + alpha_m*delta_u, p) - - function cost_a(a) - ud = uk + a*delta_u - xkk, convergence = ExaPF.powerflow(polar, xk, ud, p) - return ExaPF.cost_production(polar, xkk, ud, p) - end - - F0p = FiniteDiff.finite_difference_derivative(cost_a, 0.0) - FMp = FiniteDiff.finite_difference_derivative(cost_a, alpha_m) - - v = (3.0/alpha_m)*(F0 - FM) + F0p + FMp - w = sqrt(v^2 - F0p*FMp) - - scale = (FMp + w - v)/(FMp - F0p + 2*w) - alpha = alpha_m - scale*alpha_m - - return alpha -end - -# sample along descent line and find minimum. -function sample_ls(nlp, uk, delta_x, delta_u, alpha_m; sample_max=30) - - xk = get(nlp, State()) - alpha = 0.0 - - function cost_a(a) - ud = uk + a*delta_u - ExaPF.update!(nlp, ud) - return ExaPF.objective(nlp, ud) - end - - alpha_vec = collect(range(0, stop=alpha_m, length=sample_max)) - f_vec = zeros(sample_max) - - for i=1:sample_max - a = alpha_vec[i] - f_vec[i] = cost_a(a) - end - - (val, ind) = findmin(f_vec) - - return alpha_vec[ind] -end - -# This function computes an approximation of delta_x -# given delta_u -function deltax_approx(delta_u, dGdx, dGdu) - b = -dGdu*delta_u - delta_x = dGdx\b - return delta_x -end - -# This function determines the descent direction given gradient -# and limits on control variables. -function descent_direction(nlp, rk, u; damping=false, scale=2.0) - pf = nlp.model.network - u_min, u_max = nlp.u_min, nlp.u_max - - dim = length(u) - delta_u = zeros(dim) - nref = length(pf.ref) - npv = length(pf.pv) - npq = length(pf.pq) - - for i=1:dim - if u[i] < u_max[i] && u[i] > u_min[i] - delta_u[i] = -rk[i] - elseif isapprox(u[i], u_max[i]) && rk[i] > 0.0 - delta_u[i] = -rk[i] - elseif isapprox(u[i], u_min[i]) && rk[i] < 0.0 - delta_u[i] = -rk[i] - end - end - - # u = [VMAG^{REF}, P^{PV}, VMAG^{PV}] - - # scale ratio - for i=1:npv - delta_u[nref + i] = scale*delta_u[nref + i] - end - - # damping factor - if damping - for i=1:npv - idx_u = nref + npv + i - if u[idx_u] < u_max[idx_u] && u[idx_u] > u_min[idx_u] - if rk[idx_u] < 0.0 - damp = min((u_max[idx_u] - u[idx_u]), 1.0) - delta_u[idx_u] = damp*delta_u[idx_u] - elseif rk[idx_u] > 0.0 - damp = min((u[idx_u] - u_min[idx_u]), 1.0) - delta_u[idx_u] = damp*delta_u[idx_u] - end - end - end - end - - return delta_u -end - -# This function determines if the algorithm has converged -function check_convergence(rk, u, u_min, u_max; eps=1e-4) - dim = length(rk) - - for i=1:dim - if u[i] < u_max[i] && u[i] > u_min[i] && abs(rk[i]) > eps - return false - elseif isapprox(u[i], u_max[i]) && rk[i] > eps - return false - elseif isapprox(u[i], u_min[i]) && rk[i] < eps - return false - end - end - - return true -end - -# obtain the maximum alpha along the descent line that satisfies all the -# constraints. -function alpha_max(nlp, delta_x, uk, delta_u) - x_min, x_max = bounds(nlp.model, State()) - u_min, u_max = nlp.u_max, nlp.u_min - xk = ExaPF.get(nlp, State()) - - x_dim = length(delta_x) - u_dim = length(delta_u) - - alpha_x = 1e10 - alpha_u = 1e10 - - for i=1:u_dim - if abs(delta_u[i]) > 0.0 - am = (u_max[i] - uk[i])/delta_u[i] - al = (u_min[i] - uk[i])/delta_u[i] - #@printf("i: %d. delta_u: %f. uk= %f, u_min=%f, u_max=%f, am=%f, al=%f\n", - # i, delta_u[i], uk[i], u_min[i], u_max[i], am, al) - # alpha needs to be positive - a_prop = max(am, al) - # need to find alpha that satisfies all constraints - alpha_u = min(alpha_u, a_prop) - end - end - - for i=1:x_dim - if abs(delta_x[i]) > 0.0 && (x_max[i] > x_min[i]) - am = (x_max[i] - xk[i])/delta_x[i] - al = (x_min[i] - xk[i])/delta_x[i] - # alpha needs to be positive - a_prop = max(am, al) - # need to find alpha that satisfies all constraints - alpha_x = min(alpha_x, a_prop) - end - end - - # in some cases there seems to be no feasible step with the computed - # delta_x. maybe this is a problem with using linearized approximation. - if alpha_x < 0.0 - return alpha_u - end - - # TODO: we will only calculate alpha_max for u for now. - # Need to take a closer look to delta_x approximation. - return alpha_u -end - -# given limit alpha, compute costs along a direction. -function cost_direction(nlp, u, delta_u, alpha_max, alpha_dav; points=10) - - alphas = zeros(points) - costs = zeros(points) - k = 1 - - for a=range(0.0, stop=alpha_max, length=points) - u_prop = u + a*delta_u - ExaPF.update!(nlp, u_prop) - c = ExaPF.objective(nlp, u_prop) - - alphas[k] = a - costs[k] = c - k += 1 - end - - #UNCOMMENT THESE LINES - # plt = lineplot(alphas, costs, title = "Cost along alpha", width=80); - # alpha_dav_vert = alpha_dav*ones(points) - # scatterplot!(plt, alpha_dav_vert, costs) - # println(plt) - -end - -function run_ffwu(datafile; bfgs=false) - polar = PolarForm(datafile, CPU()) - - xk = ExaPF.initial(polar, State()) - uk = ExaPF.initial(polar, Control()) - - buffer = ExaPF.get(polar, ExaPF.PhysicalState()) - nlp = @time ExaPF.ReducedSpaceEvaluator(polar) - - # reduced gradient method - iterations = 0 - iter_max = 30 - step = 0.0001 - converged = false - norm_tol = 1e-5 - - # initialize arrays - grad = similar(uk) - fill!(grad, 0) - grad_prev = copy(grad) - - cost_history = zeros(iter_max) - grad_history = zeros(iter_max) - - if bfgs - H = InverseLBFGSOperator(Float64, length(uk), 50, scaling=true) - end - - iter = 1 - @printf("%6s %12s %6s %6s\n", "iter", "objective", "αₘₐₓ", "αₗₛ") - while !converged && iter <= iter_max - # solve power flow and compute gradients - ExaPF.update!(nlp, uk) - - # evaluate cost - c = ExaPF.objective(nlp, uk) - cost_history[iter] = c - - # check convergence - converged = check_convergence(grad_prev, uk, nlp.u_min, nlp.u_max) - - # compute descent direction - if bfgs - delta_u = -H * grad_prev - else - ExaPF.gradient!(nlp, grad, uk) - delta_u = descent_direction(nlp, grad, uk, damping=false, scale=2.0) - end - - # compute gradients of G - dGdx = nlp.state_jacobian.x.J - dGdu = nlp.state_jacobian.u.J - - # Line search - # 1st - compute approximate delta_x - delta_x = deltax_approx(delta_u, dGdx, dGdu) - # 2nd - compute a_m such that x + a_m*delta_x, u + a_m*delta_u - # does not violate constraints - a_m = alpha_max(nlp, delta_x, uk, delta_u) - - # 3rd - fit cubic and find its minimum. - #a_dav = davidon_ls(pf, nlp.x, uk, p, delta_x, delta_u, a_m) - a_ls = sample_ls(nlp, uk, delta_x, delta_u, a_m, sample_max=20) - @printf("%6d %.8e %.6e %.6e %.4e\n", iter, c, a_m, a_ls, norm(delta_u)) - - # Uncomment function below to plot the cost function along the descent direction - # and the calculated alpha. - # cost_direction(nlp, uk, delta_u, a_m, a_ls; points=100) - - # compute control step - uk .= uk .+ a_ls*delta_u - grad_history[iter] = norm(grad) - - # compute gradient - if bfgs - ExaPF.gradient!(nlp, grad, uk) - bfgs && push!(H, a_ls * delta_u, grad - grad_prev) - grad_prev .= grad - end - iter += 1 - end - # uncomment to plot cost evolution - # plt = lineplot(cost_history[1:iter - 1], title = "Cost history", width=80); - # plt = lineplot(log10.(grad_history[1:iter - 1]), title = "Cost history", width=80); - # println(plt) - - return -end -datafile = joinpath(dirname(@__FILE__), "..", "data", "case9.m") -run_ffwu(datafile; bfgs=false) diff --git a/src/Evaluators/Evaluators.jl b/src/Evaluators/Evaluators.jl deleted file mode 100644 index 396ab7e4..00000000 --- a/src/Evaluators/Evaluators.jl +++ /dev/null @@ -1,255 +0,0 @@ - -""" - AbstractNLPEvaluator - -AbstractNLPEvaluator implements the bridge between the -problem formulation (see [`AbstractFormulation`](@ref)) and the optimization -solver. Once the problem formulation bridged, the evaluator allows -to evaluate: -- the objective; -- the gradient of the objective; -- the constraints; -- the Jacobian of the constraints; -- the Jacobian-vector and transpose-Jacobian vector products of the constraints; -- the Hessian of the objective; -- the Hessian of the Lagrangian. - -""" -abstract type AbstractNLPEvaluator end - -abstract type AbstractNLPAttribute end - -""" - Variables <: AbstractNLPAttribute end - -Attribute corresponding to the optimization variables attached -to a given [`AbstractNLPEvaluator`](@ref). -""" -struct Variables <: AbstractNLPAttribute end - -""" - Constraints <: AbstractNLPAttribute end - -Attribute corresponding to the constraints attached -to a given [`AbstractNLPEvaluator`](@ref). -""" -struct Constraints <: AbstractNLPAttribute end - -""" - n_variables(nlp::AbstractNLPEvaluator) -Get the number of variables in the problem. -""" -function n_variables end - -""" - n_constraints(nlp::AbstractNLPEvaluator) -Get the number of constraints in the problem. -""" -function n_constraints end - -# Callbacks -""" - objective(nlp::AbstractNLPEvaluator, u)::Float64 - -Evaluate the objective at given variable `u`. -""" -function objective end - -""" - gradient!(nlp::AbstractNLPEvaluator, g, u) - -Evaluate the gradient of the objective, at given variable `u`. -Store the result inplace in the vector `g`. - -## Note -The vector `g` should have the same dimension as `u`. - -""" -function gradient! end - -""" - constraint!(nlp::AbstractNLPEvaluator, cons, u) - -Evaluate the constraints of the problem at given variable `u`. Store -the result inplace, in the vector `cons`. - -## Note -The vector `cons` should have the same dimension as the result -returned by `n_constraints(nlp)`. - -""" -function constraint! end - -""" - jacobian_structure!(nlp::AbstractNLPEvaluator, rows, cols) - -Return the sparsity pattern of the Jacobian matrix. Stores -the results inplace, in the vectors `rows` and `cols` (whose -dimension should match the number of non-zero in the Jacobian matrix). - -""" -function jacobian_structure! end - -""" - jacobian!(nlp::AbstractNLPEvaluator, jac, u) - -Evaluate the Jacobian of the constraints, at variable `u`. -Store the result inplace, in the `m x n` dense matrix `jac`. - -""" -function jacobian! end - -""" - jprod!(nlp::AbstractNLPEvaluator, jv, u, v) - -Evaluate the Jacobian-vector product ``J v`` of the constraints. -The vector `jv` is modified inplace. - -Let `(n, m) = n_variables(nlp), n_constraints(nlp)`. - -* `u` is a vector with dimension `n` -* `v` is a vector with dimension `n` -* `jv` is a vector with dimension `m` - -""" -function jprod! end - -""" - jtprod!(nlp::AbstractNLPEvaluator, jv, u, v) - -Evaluate the transpose Jacobian-vector product ``J^{T} v`` of the constraints. -The vector `jv` is modified inplace. - -Let `(n, m) = n_variables(nlp), n_constraints(nlp)`. - -* `u` is a vector with dimension `n` -* `v` is a vector with dimension `m` -* `jv` is a vector with dimension `n` - -""" -function jtprod! end - -""" - ojtprod!(nlp::AbstractNLPEvaluator, jv, u, σ, v) - -Evaluate the transpose Jacobian-vector product `J' * [σ ; v]`, -with `J` the Jacobian of the vector `[f(x); h(x)]`. -`f(x)` is the current objective and `h(x)` constraints. -The vector `jv` is modified inplace. - -Let `(n, m) = n_variables(nlp), n_constraints(nlp)`. - -* `jv` is a vector with dimension `n` -* `u` is a vector with dimension `n` -* `σ` is a scalar -* `v` is a vector with dimension `m` - -""" -function ojtprod! end - -""" - hessian!(nlp::AbstractNLPEvaluator, H, u) - -Evaluate the Hessian `∇²f(u)` of the objective function `f(u)`. -Store the result inplace, in the `n x n` dense matrix `H`. - -""" -function hessian! end - -""" - hessprod!(nlp::AbstractNLPEvaluator, hessvec, u, v) - -Evaluate the Hessian-vector product `∇²f(u) * v` of the objective -evaluated at variable `u`. -Store the result inplace, in the vector `hessvec`. - -## Note -The vector `hessprod` should have the same length as `u`. - -""" -function hessprod! end - -@doc raw""" - hessian_lagrangian_penalty_prod!(nlp::AbstractNLPEvaluator, hessvec, u, y, σ, d, v) - -Evaluate the Hessian-vector product of the Lagrangian -function ``L(u, y) = f(u) + \sum_i y_i c_i(u) + \frac{1}{2} d_i c_i(u)^2`` with a vector `v`: -```math -∇²L(u, y) ⋅ v = σ ∇²f(u) ⋅ v + \sum_i (y_i + d_i) ∇²c_i(u) ⋅ v + \sum_i d_i ∇c_i(u)^T ∇c_i(u) -``` - -Store the result inplace, in the vector `hessvec`. - -### Arguments - -* `hessvec` is a `AbstractVector` with dimension `n`, which is modified inplace. -* `u` is a `AbstractVector` with dimension `n`, storing the current variable. -* `y` is a `AbstractVector` with dimension `n`, storing the current constraints' multipliers -* `σ` is a scalar -* `v` is a vector with dimension `n`. -* `d` is a vector with dimension `m`. -""" -function hessian_lagrangian_penalty_prod! end - -# Utilities -""" - primal_infeasibility(nlp::AbstractNLPEvaluator, u) - -Return primal infeasibility associated to current model `nlp` evaluated -at variable `u`. - -""" -function primal_infeasibility end - -""" - primal_infeasibility!(nlp::AbstractNLPEvaluator, cons, u) - -Return primal infeasibility associated to current model `nlp` evaluated -at variable `u`. Modify vector `cons` inplace. - -""" -function primal_infeasibility! end - -"Return `true` if the problem is constrained, `false` otherwise." -is_constrained(nlp::AbstractNLPEvaluator) = n_constraints(nlp) > 0 - -""" - constraints_type(nlp::AbstractNLPEvaluator) - -Return the type of the non-linear constraints of the evaluator `nlp`, -as a `Symbol`. Result could be `:inequality` if problem has only -inequality constraints, `:equality` if problem has only equality -constraints, or `:mixed` if problem has both types of constraints. -""" -function constraints_type end - -"Check if Hessian of objective is implemented." -has_hessian(nlp::AbstractNLPEvaluator) = false -"Check if Hessian of Lagrangian is implemented." -has_hessian_lagrangian(nlp::AbstractNLPEvaluator) = false - -""" - reset!(nlp::AbstractNLPEvaluator) - -Reset evaluator `nlp` to default configuration. - -""" -function reset! end - -include("common.jl") -include("bridge_evaluator.jl") - -# Basic evaluators -include("reduced_evaluator.jl") -include("slack_evaluator.jl") -include("feasibility_evaluator.jl") -include("proxal_evaluators.jl") - -# Penalty evaluators -include("penalty.jl") -include("auglag.jl") - -# Bridge with MOI -include("MOI_wrapper.jl") -include("optimizers.jl") - diff --git a/src/Evaluators/MOI_wrapper.jl b/src/Evaluators/MOI_wrapper.jl deleted file mode 100644 index d47d69aa..00000000 --- a/src/Evaluators/MOI_wrapper.jl +++ /dev/null @@ -1,106 +0,0 @@ -# MOI wrapper - -""" - MOIEvaluator <: MOI.AbstractNLPEvaluator - -Bridge from a [`ExaPF.AbstractNLPEvaluator`](@ref) to a `MOI.AbstractNLPEvaluator`. - -## Attributes - -* `nlp::AbstractNLPEvaluator`: the underlying `ExaPF` problem. -* `hash_x::UInt`: hash of the last evaluated variable `x` -* `has_hess::Bool` (default: `false`): if `true`, pass a Hessian structure to MOI. - -""" -mutable struct MOIEvaluator{Evaluator<:AbstractNLPEvaluator,Hess} <: MOI.AbstractNLPEvaluator - nlp::Evaluator - hash_x::UInt - has_hess::Bool - hess_buffer::Hess -end -# MOI needs Hessian of Lagrangian function -function MOIEvaluator(nlp) - hess = nothing - if has_hessian_lagrangian(nlp) - n = n_variables(nlp) - hess = zeros(n, n) - end - return MOIEvaluator(nlp, UInt64(0), has_hessian_lagrangian(nlp), hess) -end - -function _update!(ev::MOIEvaluator, x) - hx = hash(x) - if hx != ev.hash_x - update!(ev.nlp, x) - ev.hash_x = hx - end -end - -MOI.features_available(ev::MOIEvaluator) = ev.has_hess ? [:Grad, :Hess] : [:Grad] -MOI.initialize(ev::MOIEvaluator, features) = nothing - -function MOI.jacobian_structure(ev::MOIEvaluator) - rows, cols = jacobian_structure(ev.nlp) - return Tuple{Int, Int}[(r, c) for (r, c) in zip(rows, cols)] -end - -function MOI.hessian_lagrangian_structure(ev::MOIEvaluator) - n = n_variables(ev.nlp) - rows, cols = hessian_structure(ev.nlp) - return Tuple{Int, Int}[(r, c) for (r, c) in zip(rows, cols)] -end - -function MOI.eval_objective(ev::MOIEvaluator, x) - _update!(ev, x) - obj = objective(ev.nlp, x) - return obj -end - -function MOI.eval_objective_gradient(ev::MOIEvaluator, g, x) - _update!(ev, x) - gradient!(ev.nlp, g, x) -end - -function MOI.eval_constraint(ev::MOIEvaluator, cons, x) - _update!(ev, x) - constraint!(ev.nlp, cons, x) -end - -function MOI.eval_constraint_jacobian(ev::MOIEvaluator, jac, x) - _update!(ev, x) - n = length(x) - m = n_constraints(ev.nlp) - fill!(jac, 0) - J = reshape(jac, m, n) - jacobian!(ev.nlp, J, x) -end - -function MOI.eval_hessian_lagrangian(ev::MOIEvaluator, hess, x, σ, μ) - _update!(ev, x) - n = n_variables(ev.nlp) - # Evaluate full reduced Hessian in the preallocated buffer. - H = ev.hess_buffer - hessian!(ev.nlp, H, x) - # Only dense Hessian supported now - index = 1 - @inbounds for i in 1:n, j in 1:i - # Hessian is symmetric, and MOI considers only the lower - # triangular part. We average the values from the lower - # and upper triangles for stability. - hess[index] = 0.5 * σ * (H[i, j] + H[j, i]) - index += 1 - end -end - -function MOI.eval_hessian_lagrangian_product(ev::MOIEvaluator, hv, x, v, σ, μ) - _update!(ev, x) - hessprod!(ev.nlp, hv, x, v) - hv .*= σ -end - -function MOI.NLPBlockData(nlp::AbstractNLPEvaluator) - lb, ub = bounds(nlp, Constraints()) - ev = MOIEvaluator(nlp) - return MOI.NLPBlockData(MOI.NLPBoundsPair.(lb, ub), ev, true) -end - diff --git a/src/Evaluators/auglag.jl b/src/Evaluators/auglag.jl deleted file mode 100644 index 0538527c..00000000 --- a/src/Evaluators/auglag.jl +++ /dev/null @@ -1,201 +0,0 @@ -# Code for augmented Lagrangian evaluator. Inspired by the excellent NLPModels.jl: -# Ref: https://github.com/JuliaSmoothOptimizers/Percival.jl/blob/master/src/AugLagModel.jl -# Two-sided Lagrangian - -const CONSTRAINTS_TYPE = Union{Val{:inequality}, Val{:equality}, Val{:mixed}} - -@doc raw""" - AugLagEvaluator{Evaluator<:AbstractNLPEvaluator, T, VT} <: AbstractPenaltyEvaluator - -Augmented-Lagrangian evaluator. - -### Description - -Takes as input any [`AbstractNLPEvaluator`](@ref) encoding a non-linear problem -```math -\begin{aligned} - \min_u \quad & f(u)\\ -\mathrm{s.t.} \quad & h^♭ ≤ h(u) ≤ h^♯,\\ - & u^♭ ≤ u ≤ u^♯, -\end{aligned} -``` -and return a new evaluator reformulating the original problem -by moving the $m$ constraints $h^♭ ≤ h(u) ≤ h^♯$ into the objective -using a set of penalties $ϕ_1, ⋯, ϕ_m$ and multiplier estimates -$λ_1, ⋯, λ_m$: -```math -\begin{aligned} - \min_u \quad & f(u) + \sum_{i=1}^m ϕ_i(h_i, λ_i) \\ -\mathrm{s.t.} \quad & u^♭ ≤ u ≤ u^♯, -\end{aligned} -``` - -This evaluator considers explicitly the inequality constraints, -without reformulating them by introducing slack variables. Each -penalty $ϕ_i$ is defined as -```math -ϕ_i(h_i, λ_i) = λ_i^⊤ φ_i(h_i) + \frac \rho2 \| φ_i(h_i) \|_2^2 -``` -with $φ_i$ a function to compute the current infeasibility -```math -φ_i(h_i, λ_i) = \max\{0 , λ_i + ρ (h_i - h_i^♯) \} + \min\{0 , λ_i + ρ (h_i - h_i^♭) \} -``` - -### Attributes - -* `inner::Evaluator`: original problem. -* `cons_type`: type of the constraints of the original problem (equalities or inequalities). -* `cons::VT`: a buffer storing the current evaluation of the constraints for the inner evaluator. -* `rho::T`: current penalty. -* `λ::VT`: current multiplier. -* `scaler::MaxScaler{T,VT}`: a scaler to rescale the range of the constraints in the original problem. - -""" -mutable struct AugLagEvaluator{Evaluator<:AbstractNLPEvaluator, T, VT} <: AbstractPenaltyEvaluator - inner::Evaluator - # Type - cons_type::CONSTRAINTS_TYPE - cons::VT - ρ::T - λ::VT - λc::VT - # Scaling - scaler::MaxScaler{T, VT} - # Stats - counter::NLPCounter -end -function AugLagEvaluator( - nlp::AbstractNLPEvaluator, u0; - scale=false, c₀=0.1, -) - if !is_constrained(nlp) - error("Model specified in `nlp` is unconstrained. Instead of using" * - " an Augmented Lagrangian algorithm, you could use any "* - "bound constrained solver instead.") - end - cons_type = Val(constraints_type(nlp)) - if !isa(cons_type, CONSTRAINTS_TYPE) - error("Constraints $(constraints_type(nlp)) is not supported by" * - " AugLagEvaluator.") - end - - g_min, g_max = bounds(nlp, Constraints()) - cx = similar(g_min) ; fill!(cx, 0) - λc = similar(g_min) ; fill!(λc, 0) - λ = similar(g_min) ; fill!(λ, 0) - - scaler = scale ? MaxScaler(nlp, u0) : MaxScaler(g_min, g_max) - return AugLagEvaluator(nlp, cons_type, cx, c₀, λ, λc, scaler, NLPCounter()) -end -function AugLagEvaluator( - datafile::String; device=CPU(), scale=false, c₀=0.1, options... -) - nlp = ReducedSpaceEvaluator(datafile; device=device, options...) - u0 = initial(nlp) - return AugLagEvaluator(nlp, u0; scale=scale, c₀=c₀) -end - -has_hessian(nlp::AugLagEvaluator) = has_hessian(nlp.inner) -backend(nlp::AugLagEvaluator) = backend(nlp.inner) - -# Default fallback -function _update_internal!(ag::AugLagEvaluator, ::CONSTRAINTS_TYPE) - # Update (shifted) infeasibility error - g♭ = ag.scaler.g_min - g♯ = ag.scaler.g_max - ag.λc .= max.(0, ag.λ .+ ag.ρ .* (ag.cons .- g♯)) .+ - min.(0, ag.λ .+ ag.ρ .* (ag.cons .- g♭)) - ag.cons .= max.(0, ag.cons .- g♯) .+ min.(0, ag.cons .- g♭) -end - -# Specialization for equality constraints -function _update_internal!(ag::AugLagEvaluator, ::Val{:equality}) - ag.λc .= ag.λ .+ ag.ρ .* ag.cons -end - -function update!(ag::AugLagEvaluator, u) - conv = update!(ag.inner, u) - # Update constraints - constraint!(ag.inner, ag.cons, u) - # Rescale - ag.cons .*= ag.scaler.scale_cons - _update_internal!(ag, ag.cons_type) - return conv -end - -function update_penalty!(ag::AugLagEvaluator; η=10.0) - ag.ρ = min(η * ag.ρ, 10e12) -end - -function update_multipliers!(ag::AugLagEvaluator) - ag.λ .= ag.λc - return -end - -function objective(ag::AugLagEvaluator, u) - ag.counter.objective += 1 - base_nlp = ag.inner - cx = ag.cons - obj = ag.scaler.scale_obj * objective(base_nlp, u) + - 0.5 * ag.ρ * dot(cx, cx) + dot(ag.λ, cx) - return obj -end - -function inner_objective(ag::AugLagEvaluator, u) - return ag.scaler.scale_obj * objective(ag.inner, u) -end - -function gradient!(ag::AugLagEvaluator, grad, u) - ag.counter.gradient += 1 - base_nlp = ag.inner - scaler = ag.scaler - σ = scaler.scale_obj - mask = abs.(ag.cons) .> 0 - v = scaler.scale_cons .* ag.λc .* mask - ojtprod!(base_nlp, grad, u, σ, v) - return -end - -function hessprod!(ag::AugLagEvaluator, hessvec, u, w) - ag.counter.hprod += 1 - scaler = ag.scaler - cx = ag.cons - mask = abs.(cx) .> 0 - - σ = scaler.scale_obj - y = (scaler.scale_cons .* ag.λc .* mask) - ρ = ag.ρ .* (scaler.scale_cons .* scaler.scale_cons .* mask) - - hessian_lagrangian_penalty_prod!(ag.inner, hessvec, u, y, σ, ρ, w)::Nothing - return -end - -function hessian!(ag::AugLagEvaluator, H, u) - ag.counter.hessian += 1 - scaler = ag.scaler - cx = ag.cons - mask = abs.(cx) .> 0 - - σ = scaler.scale_obj - y = (scaler.scale_cons .* ag.λc .* mask) - ρ = ag.ρ .* (scaler.scale_cons .* scaler.scale_cons .* mask) - hessian_lagrangian_penalty!(ag.inner, H, u, y, σ, ρ) - return -end - -function estimate_multipliers(ag::AugLagEvaluator, u) - J = Diagonal(ag.scaler.scale_cons) * jacobian(ag.inner, u) - ∇f = gradient(ag.inner, u) - ∇f .*= ag.scaler.scale_obj - λ = - (J * J') \ (J * ∇f) - return λ -end - -function reset!(ag::AugLagEvaluator) - reset!(ag.inner) - empty!(ag.counter) - fill!(ag.cons, 0) - fill!(ag.λ, 0) - fill!(ag.λc, 0) -end - diff --git a/src/Evaluators/bridge_evaluator.jl b/src/Evaluators/bridge_evaluator.jl deleted file mode 100644 index 7350d458..00000000 --- a/src/Evaluators/bridge_evaluator.jl +++ /dev/null @@ -1,151 +0,0 @@ - -#= - BridgeEvaluator -=# -struct BridgeDeviceArrays{VT, MT} - u::VT - g::VT - cons::VT - v::VT - y::VT - w::VT - jv::VT - J::MT - H::MT -end - -function BridgeDeviceArrays(n::Int, m::Int, VT, MT) - BridgeDeviceArrays{VT, MT}( - VT(undef, n), - VT(undef, n), - VT(undef, m), - VT(undef, m), - VT(undef, m), - VT(undef, m), - VT(undef, n), - MT(undef, m, n), - MT(undef, n, n), - ) -end - -struct BridgeDeviceEvaluator{Evaluator, VT, MT, DVT, DMT} <: AbstractNLPEvaluator - inner::Evaluator - bridge::BridgeDeviceArrays{DVT, DMT} -end -function BridgeDeviceEvaluator(nlp::AbstractNLPEvaluator, device) - if isa(nlp, BridgeDeviceEvaluator) - error("BridgeDeviceEvaluator cannot wrap another BridgeDeviceEvaluator.") - end - n, m = n_variables(nlp), n_constraints(nlp) - # Deporting device - VT = Array{Float64, 1} - MT = Array{Float64, 2} - model = backend(nlp) - if isa(model.device, CPU) - VTD = Array{Float64, 1} - MTD = Array{Float64, 2} - else - VTD = CUDA.CuArray{Float64, 1} - MTD = CUDA.CuArray{Float64, 2} - end - bridge = BridgeDeviceArrays(n, m, VTD, MTD) - return BridgeDeviceEvaluator{typeof(nlp), VT, MT, VTD, MTD}(nlp, bridge) -end -function BridgeDeviceEvaluator(case::String; device=KA.CPU()) - nlp = ReducedSpaceEvaluator(case) - return BridgeDeviceEvaluator(nlp, device) -end - -n_variables(nlp::BridgeDeviceEvaluator) = n_variables(nlp.inner) -n_constraints(nlp::BridgeDeviceEvaluator) = n_constraints(nlp.inner) -constraints_type(nlp::BridgeDeviceEvaluator) = constraints_type(nlp.inner) -has_hessian(nlp::BridgeDeviceEvaluator) = has_hessian(nlp.inner) -reset!(nlp::BridgeDeviceEvaluator) = reset!(nlp.inner) - -# Getters -get(nlp::BridgeDeviceEvaluator, attr::AbstractNLPAttribute) = get(nlp.inner, attr) -get(nlp::BridgeDeviceEvaluator, attr::AbstractVariable) = get(nlp.inner, attr) -get(nlp::BridgeDeviceEvaluator, attr::PS.AbstractNetworkAttribute) = get(nlp.inner, attr) - -function setvalues!(nlp::BridgeDeviceEvaluator, attr::PS.AbstractNetworkValues, values) - setvalues!(nlp.inner, attr, values) -end - -function bounds(nlp::BridgeDeviceEvaluator{Ev, VT, MT, DVT, DMT}, attr::AbstractNLPAttribute) where {Ev, VT, MT, DVT, DMT} - b♭, b♯ = bounds(nlp.inner, attr) - return b♭ |> VT, b♯ |> VT -end - -function initial(nlp::BridgeDeviceEvaluator{Ev, VT, MT, DVT, DMT}) where {Ev, VT, MT, DVT, DMT} - return initial(nlp.inner) |> VT -end - -function update!(nlp::BridgeDeviceEvaluator, u) - copyto!(nlp.bridge.u, u) - return update!(nlp.inner, nlp.bridge.u) -end - -objective(nlp::BridgeDeviceEvaluator, u) = objective(nlp.inner, nlp.bridge.u) - -function constraint!(nlp::BridgeDeviceEvaluator, cons, u) - constraint!(nlp.inner, nlp.bridge.cons, nlp.bridge.u) - copyto!(cons, nlp.bridge.cons) - return -end - -function gradient!(nlp::BridgeDeviceEvaluator, grad, u) - gradient!(nlp.inner, nlp.bridge.g, nlp.bridge.u) - copyto!(grad, nlp.bridge.g) - return -end - -function jtprod!(nlp::BridgeDeviceEvaluator, jv, u, v) - copyto!(nlp.bridge.v, v) - jtprod!(nlp.inner, nlp.bridge.jv, nlp.bridge.u, nlp.bridge.v) - copyto!(jv, nlp.bridge.jv) - return -end - -function jacobian!(nlp::BridgeDeviceEvaluator, jac, w) - jacobian!(nlp.inner, nlp.bridge.J, nlp.bridge.u) - copyto!(jac, nlp.bridge.J) - return -end - -function ojtprod!(nlp::BridgeDeviceEvaluator, jv, u, σ, v) - copyto!(nlp.bridge.v, v) - ojtprod!(nlp.inner, nlp.bridge.jv, nlp.bridge.u, σ, nlp.bridge.v) - copyto!(jv, nlp.bridge.jv) - return -end - -function hessprod!(nlp::BridgeDeviceEvaluator, hv, u, v) - copyto!(nlp.bridge.g, v) - hessprod!(nlp.inner, nlp.bridge.jv, nlp.bridge.u, nlp.bridge.g) - copyto!(hv, nlp.bridge.jv) - return -end - -function hessian!(nlp::BridgeDeviceEvaluator, H, u) - hessian!(nlp.inner, nlp.bridge.H, nlp.bridge.u) - copyto!(H, nlp.bridge.H) - return -end - -function hessian_lagrangian_penalty_prod!(nlp::BridgeDeviceEvaluator, hv, u, y, σ, w, v) - copyto!(nlp.bridge.g, v) - copyto!(nlp.bridge.w, w) - copyto!(nlp.bridge.y, y) - hessian_lagrangian_penalty_prod!(nlp.inner, nlp.bridge.jv, nlp.bridge.u, nlp.bridge.y, σ, nlp.bridge.w, nlp.bridge.g) - copyto!(hv, nlp.bridge.jv) - return -end - -function hessian_lagrangian_penalty!(nlp::BridgeDeviceEvaluator, H, u, y, σ, w) - copyto!(nlp.bridge.w, w) - copyto!(nlp.bridge.y, y) - hessian_lagrangian_penalty!(nlp.inner, nlp.bridge.H, nlp.bridge.u, nlp.bridge.y, σ, nlp.bridge.w) - copyto!(H, nlp.bridge.H) - return -end - diff --git a/src/Evaluators/common.jl b/src/Evaluators/common.jl deleted file mode 100644 index e4fc4d70..00000000 --- a/src/Evaluators/common.jl +++ /dev/null @@ -1,163 +0,0 @@ -# Common interface for AbstractNLPEvaluator -# -function Base.show(io::IO, nlp::AbstractNLPEvaluator) - n = n_variables(nlp) - m = n_constraints(nlp) - println(io, "A Evaluator object") - println(io, " * #vars: ", n) - println(io, " * #cons: ", m) -end - -## Generic callbacks -function constraint(nlp::AbstractNLPEvaluator, x) - cons = similar(x, n_constraints(nlp)) ; fill!(cons, 0) - constraint!(nlp, cons, x) - return cons -end - -function gradient(nlp::AbstractNLPEvaluator, x) - ∇f = similar(x) ; fill!(∇f, 0) - gradient!(nlp, ∇f, x) - return ∇f -end - -function jacobian(nlp::AbstractNLPEvaluator, x) - n = n_variables(nlp) - m = n_constraints(nlp) - J = similar(x, m, n) ; fill!(J, 0) - jacobian!(nlp, J, x) - return J -end - -# Default implementation of jprod!, using full Jacobian matrix -function jprod!(nlp::AbstractNLPEvaluator, jv, u, v) - nᵤ = length(u) - m = n_constraints(nlp) - @assert nᵤ == length(v) - jac = jacobian(nlp, u) - mul!(jv, jac, v) - return -end - -# Joint Objective Jacobian transpose vector product (default implementation) -function ojtprod!(nlp::AbstractNLPEvaluator, jv, u, σ, v) - gradient!(nlp, jv, u) - jv .*= σ # scale gradient - jtprod!(nlp, jv, u, v) - return -end - -# Generate Hessian using Hessian-vector product routine -macro define_hessian(function_name, target_function, args...) - fname = Symbol(function_name) - argstup = Tuple(args) - quote - function $(esc(fname))(nlp::AbstractNLPEvaluator, hess, $(map(esc, argstup)...)) - @assert has_hessian(nlp) - n = n_variables(nlp) - v = similar(x) - @inbounds for i in 1:n - hv = @view hess[:, i] - fill!(v, 0) - v[i:i] .= 1.0 - $target_function(nlp, hv, $(map(esc, argstup)...), v) - end - end - end -end - -@define_hessian hessian! hessprod! x -@define_hessian hessian_lagrangian_penalty! hessian_lagrangian_penalty_prod! x y σ D - -function hessian(nlp::AbstractNLPEvaluator, x) - n = n_variables(nlp) - H = similar(x, n, n) ; fill!(H, 0) - hessian!(nlp, H, x) - return H -end - -# Counters -abstract type AbstractCounter end - -mutable struct NLPCounter <: AbstractCounter - objective::Int - gradient::Int - hessian::Int - jacobian::Int - jtprod::Int - hprod::Int -end -NLPCounter() = NLPCounter(0, 0, 0, 0, 0, 0) - -function Base.empty!(c::NLPCounter) - for attr in fieldnames(NLPCounter) - setfield!(c, attr, 0) - end -end - -# Active set utils -function _check(val, val_min, val_max) - violated_inf = findall(val .< val_min) - violated_sup = findall(val .> val_max) - n_inf = length(violated_inf) - n_sup = length(violated_sup) - err_inf = xnorm_inf(val_min[violated_inf] .- val[violated_inf]) - err_sup = xnorm_inf(val[violated_sup] .- val_max[violated_sup]) - return (n_inf, err_inf, n_sup, err_sup) -end - -function _inf_pr(nlp::AbstractNLPEvaluator, cons) - (n_inf, err_inf, n_sup, err_sup) = _check(cons, nlp.g_min, nlp.g_max) - return max(err_inf, err_sup) -end - -#= - SCALER -=# -abstract type AbstractScaler end - -scale_factor(h, tol, η) = max(tol, η / max(1.0, h)) - -struct MaxScaler{T, VT} <: AbstractScaler - scale_obj::T - scale_cons::VT - g_min::VT - g_max::VT -end -function MaxScaler(g_min, g_max) - @assert length(g_min) == length(g_max) - m = length(g_min) - sc = similar(g_min) ; fill!(sc, 1.0) - return MaxScaler{eltype(g_min), typeof(g_min)}(1.0, sc, g_min, g_max) -end - - -function MaxScaler(nlp::AbstractNLPEvaluator, u0::AbstractVector; - η=100.0, tol=1e-8) - n = n_variables(nlp) - m = n_constraints(nlp) - conv = update!(nlp, u0) - ∇g = similar(u0) ; fill!(∇g, 0) - gradient!(nlp, ∇g, u0) - - s_obj = scale_factor(xnorm_inf(∇g), tol, η) - - VT = typeof(u0) - ∇c = xzeros(VT, n) - h_s_cons = zeros(m) - v = xzeros(VT, m) - h_v = zeros(m) - for i in eachindex(h_s_cons) - fill!(h_v, 0.0) - h_v[i] = 1.0 - copyto!(v, h_v) - jtprod!(nlp, ∇c, u0, v) - h_s_cons[i] = scale_factor(xnorm_inf(∇c), tol, η) - end - - g♭, g♯ = bounds(nlp, Constraints()) - s_cons = h_s_cons |> VT - - return MaxScaler{typeof(s_obj), typeof(s_cons)}(s_obj, s_cons, s_cons .* g♭, s_cons .* g♯) -end - diff --git a/src/Evaluators/derivatives.jl b/src/Evaluators/derivatives.jl deleted file mode 100644 index e69de29b..00000000 diff --git a/src/Evaluators/feasibility_evaluator.jl b/src/Evaluators/feasibility_evaluator.jl deleted file mode 100644 index d02abf49..00000000 --- a/src/Evaluators/feasibility_evaluator.jl +++ /dev/null @@ -1,106 +0,0 @@ - -""" - FeasibilityEvaluator{T} <: AbstractNLPEvaluator - -TODO - -""" -mutable struct FeasibilityEvaluator{Evaluator<:AbstractNLPEvaluator, T, VT} <: AbstractNLPEvaluator - inner::Evaluator - x_min::VT - x_max::VT - cons::VT -end -function FeasibilityEvaluator(nlp::AbstractNLPEvaluator) - if !is_constrained(nlp) - error("Input problem must have inequality constraints") - end - x_min, x_max = bounds(nlp, Variables()) - cx = similar(x_min, n_constraints(nlp)) - return FeasibilityEvaluator{typeof(nlp), eltype(x_min), typeof(x_min)}(nlp, x_min, x_max, cx) -end -function FeasibilityEvaluator(datafile::String; device=CPU()) - nlp = SlackEvaluator(datafile; device=device) - return FeasibilityEvaluator(nlp) -end - -n_variables(nlp::FeasibilityEvaluator) = n_variables(nlp.inner) -n_constraints(nlp::FeasibilityEvaluator) = 0 - -constraints_type(::FeasibilityEvaluator) = :bound - -has_hessian(nlp::FeasibilityEvaluator) = has_hessian(nlp.inner) -has_hessian_lagrangian(nlp::FeasibilityEvaluator) = has_hessian(nlp) -backend(nlp::FeasibilityEvaluator) = backend(nlp.inner) - -# Getters -get(nlp::FeasibilityEvaluator, attr::AbstractNLPAttribute) = get(nlp.inner, attr) -get(nlp::FeasibilityEvaluator, attr::AbstractVariable) = get(nlp.inner, attr) -get(nlp::FeasibilityEvaluator, attr::PS.AbstractNetworkAttribute) = get(nlp.inner, attr) - -# Setters -function setvalues!(nlp::FeasibilityEvaluator, attr::PS.AbstractNetworkValues, values) - setvalues!(nlp.inner, attr, values) -end - -# Bounds -bounds(nlp::FeasibilityEvaluator, ::Variables) = bounds(nlp.inner, Variables()) -bounds(nlp::FeasibilityEvaluator, ::Constraints) = (Float64[], Float64[]) - -initial(nlp::FeasibilityEvaluator) = initial(nlp.inner) - -function update!(nlp::FeasibilityEvaluator, u) - conv = update!(nlp.inner, u) - constraint!(nlp.inner, nlp.cons, u) - return conv -end - -# f(x) = 0.5 * || c(x) ||² -function objective(nlp::FeasibilityEvaluator, u) - return 0.5 * dot(nlp.cons, nlp.cons) -end - -function constraint!(nlp::FeasibilityEvaluator, cons, u) - @assert length(cons) == 0 - return -end - -# Gradient -# ∇f = J' * c(x) -function gradient!(nlp::FeasibilityEvaluator, grad, u) - σ = 0.0 - ojtprod!(nlp.inner, grad, u, σ, nlp.cons) - return -end - -jacobian_structure(ag::FeasibilityEvaluator) = (Int[], Int[]) -function jacobian!(ag::FeasibilityEvaluator, jac, u) - @assert length(jac) == 0 - return -end - -# H = ∇²c(x) + J'*J -function hessprod!(nlp::FeasibilityEvaluator, hessvec, u, v) - σ = 0.0 - # Need to update the first-order adjoint λ first - hessian_lagrangian_penalty_prod!(nlp.inner, hessvec, u, nlp.cons, σ, 0.0, v) - # J' * J * v - jv = similar(nlp.cons) - jtv = similar(u) - jprod!(nlp.inner, jv, u, v) - jtprod!(nlp.inner, jtv, u, jv) - hessvec .+= jtv - return -end - -function hessian_structure(nlp::FeasibilityEvaluator) - n = n_variables(nlp) - rows = Int[r for r in 1:n for c in 1:r] - cols = Int[c for r in 1:n for c in 1:r] - return rows, cols -end - -function reset!(nlp::FeasibilityEvaluator) - reset!(nlp.inner) -end - diff --git a/src/Evaluators/optimizers.jl b/src/Evaluators/optimizers.jl deleted file mode 100644 index 1563b7bc..00000000 --- a/src/Evaluators/optimizers.jl +++ /dev/null @@ -1,67 +0,0 @@ - -""" - optimize!(optimizer, nlp::AbstractNLPEvaluator, x0) - -Use optimization routine implemented in `optimizer` to optimize -the optimal power flow problem specified in the evaluator `nlp`. -Initial point is specified by `x0`. - -Return the solution as a named tuple, with fields -- `status::MOI.TerminationStatus`: Solver's termination status, as specified by MOI -- `minimum::Float64`: final objective -- `minimizer::AbstractVector`: final solution vector, with same ordering as the `Variables` specified in `nlp`. - - - optimize!(optimizer, nlp::AbstractNLPEvaluator) - -Wrap previous `optimize!` function and pass as initial guess `x0` -the initial value specified when calling `initial(nlp)`. - -## Examples - -```julia -nlp = ExaPF.ReducedSpaceEvaluator(datafile) -optimizer = Ipopt.Optimizer() -solution = ExaPF.optimize!(optimizer, nlp) - -``` - -## Notes -By default, the optimization routine solves a minimization -problem. - -""" -function optimize! end - -# MOI-based solver -function build!(optimizer::MOI.AbstractOptimizer, nlp::AbstractNLPEvaluator) - block_data = MOI.NLPBlockData(nlp) - u♭, u♯ = ExaPF.bounds(nlp, ExaPF.Variables()) - n = ExaPF.n_variables(nlp) - u = MOI.add_variables(optimizer, n) - # Set bounds - MOI.add_constraints( - optimizer, u, MOI.LessThan.(u♯), - ) - MOI.add_constraints( - optimizer, u, MOI.GreaterThan.(u♭), - ) - MOI.set(optimizer, MOI.NLPBlock(), block_data) - MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE) - return u -end - -function optimize!(optimizer::MOI.AbstractOptimizer, nlp::AbstractNLPEvaluator, x0) - u = build!(optimizer, nlp) - MOI.set(optimizer, MOI.VariablePrimalStart(), u, x0) - MOI.optimize!(optimizer) - x_opt = MOI.get(optimizer, MOI.VariablePrimal(), u) - solution = ( - status=MOI.get(optimizer, MOI.TerminationStatus()), - minimum=MOI.get(optimizer, MOI.ObjectiveValue()), - minimizer=x_opt, - ) - return solution -end -optimize!(optimizer, nlp::AbstractNLPEvaluator) = optimize!(optimizer, nlp, initial(nlp)) - diff --git a/src/Evaluators/penalty.jl b/src/Evaluators/penalty.jl deleted file mode 100644 index eb89d2f9..00000000 --- a/src/Evaluators/penalty.jl +++ /dev/null @@ -1,53 +0,0 @@ -abstract type AbstractPenaltyEvaluator <: AbstractNLPEvaluator end - -n_variables(ag::AbstractPenaltyEvaluator) = n_variables(ag.inner) -# All constraints moved inside the objective with penalties! -n_constraints(ag::AbstractPenaltyEvaluator) = 0 - -constraints_type(ag::AbstractPenaltyEvaluator) = :bound - -# Penalty evaluators don't have constraints so Lagrangian is objective -has_hessian_lagrangian(nlp::AbstractPenaltyEvaluator) = has_hessian(nlp) - -# Getters -get(ag::AbstractPenaltyEvaluator, attr::AbstractNLPAttribute) = get(ag.inner, attr) -get(ag::AbstractPenaltyEvaluator, attr::AbstractVariable) = get(ag.inner, attr) -get(ag::AbstractPenaltyEvaluator, attr::PS.AbstractNetworkAttribute) = get(ag.inner, attr) - -# Setters -function setvalues!(ag::AbstractPenaltyEvaluator, attr::PS.AbstractNetworkValues, values) - setvalues!(ag.inner, attr, values) -end - -# Initial position -initial(ag::AbstractPenaltyEvaluator) = initial(ag.inner) - -# Bounds -bounds(ag::AbstractPenaltyEvaluator, ::Variables) = bounds(ag.inner, Variables()) -bounds(ag::AbstractPenaltyEvaluator, ::Constraints) = Float64[], Float64[] - -# based objective -objective_original(ag::AbstractPenaltyEvaluator, u) = objective(ag.inner, u) - -function constraint!(ag::AbstractPenaltyEvaluator, cons, u) - @assert length(cons) == 0 - return -end - -function jacobian!(ag::AbstractPenaltyEvaluator, jac, u) - @assert length(jac) == 0 - return -end - -jacobian_structure(ag::AbstractPenaltyEvaluator) = (Int[], Int[]) -function jacobian_structure!(ag::AbstractPenaltyEvaluator, rows, cols) - @assert length(rows) == length(cols) == 0 -end - -function hessian_structure(ag::AbstractPenaltyEvaluator) - return hessian_structure(ag.inner) -end - -primal_infeasibility!(ag::AbstractPenaltyEvaluator, cons, u) = primal_infeasibility!(ag.inner, cons, u) -primal_infeasibility(ag::AbstractPenaltyEvaluator, u) = primal_infeasibility(ag.inner, u) - diff --git a/src/Evaluators/proxal_evaluators.jl b/src/Evaluators/proxal_evaluators.jl deleted file mode 100644 index d03f6876..00000000 --- a/src/Evaluators/proxal_evaluators.jl +++ /dev/null @@ -1,341 +0,0 @@ - -@enum(ProxALTime, - Origin, - Final, - Normal, -) - -abstract type AbstractTimeStep end -struct Current <: AbstractTimeStep end -struct Previous <: AbstractTimeStep end -struct Next <: AbstractTimeStep end - - -""" - ProxALEvaluator{T, VI, VT, MT} <: AbstractNLPEvaluator - -Evaluator wrapping a `ReducedSpaceEvaluator` for use inside the -decomposition algorithm implemented in [ProxAL.jl](https://github.com/exanauts/ProxAL.jl). - -""" -mutable struct ProxALEvaluator{T, VI, VT, MT, Pullback, Hess} <: AbstractNLPEvaluator - inner::ReducedSpaceEvaluator{T, VI, VT, MT} - obj_stack::Pullback - hessian_obj::Hess - s_min::VT - s_max::VT - nu::Int - ng::Int - # Augmented penalties parameters - time::ProxALTime - scale_objective::T - τ::T - λf::VT - λt::VT - ρf::T - ρt::T - pg_f::VT - pg_ref::VT - pg_t::VT -end -function ProxALEvaluator( - nlp::ReducedSpaceEvaluator{T, VI, VT, MT}, - time::ProxALTime; - τ=0.1, ρf=0.1, ρt=0.1, scale_obj=1.0, want_hessian=true, -) where {T, VI, VT, MT} - nu = n_variables(nlp) - ng = get(nlp, PS.NumberOfGenerators()) - - s_min = xzeros(VT, ng) - s_max = xones(VT, ng) - λf = xzeros(VT, ng) - λt = xzeros(VT, ng) - - pgf = xzeros(VT, ng) - pgc = xzeros(VT, ng) - pgt = xzeros(VT, ng) - - pbm = pullback_ramping(nlp.model, nothing) - - hess = nothing - if want_hessian - hess = AutoDiff.Hessian(nlp.model, cost_penalty_ramping_constraints; tape=pbm) - end - return ProxALEvaluator( - nlp, pbm, hess, s_min, s_max, nu, ng, time, scale_obj, - τ, λf, λt, ρf, ρt, - pgf, pgc, pgt, - ) -end -function ProxALEvaluator( - pf::PS.PowerNetwork, - time::ProxALTime; - device=KA.CPU(), - options... -) - # Build network polar formulation - model = PolarForm(pf, device) - # Build reduced space evaluator - nlp = ReducedSpaceEvaluator(model; options...) - return ProxALEvaluator(nlp, time) -end -function ProxALEvaluator( - datafile::String; - time::ProxALTime=Normal, - device=KA.CPU(), - options... -) - nlp = ReducedSpaceEvaluator(datafile; device=device, options...) - return ProxALEvaluator(nlp, time; options...) -end - -n_variables(nlp::ProxALEvaluator) = nlp.nu + nlp.ng -n_constraints(nlp::ProxALEvaluator) = n_constraints(nlp.inner) - -constraints_type(::ProxALEvaluator) = :inequality -has_hessian(::ProxALEvaluator) = true -backend(nlp::ProxALEvaluator) = backend(nlp.inner) - -# Getters -get(nlp::ProxALEvaluator, attr::AbstractNLPAttribute) = get(nlp.inner, attr) -get(nlp::ProxALEvaluator, attr::AbstractVariable) = get(nlp.inner, attr) -get(nlp::ProxALEvaluator, attr::PS.AbstractNetworkValues) = get(nlp.inner, attr) -get(nlp::ProxALEvaluator, attr::PS.AbstractNetworkAttribute) = get(nlp.inner, attr) - -# Setters -function setvalues!(nlp::ProxALEvaluator, attr::PS.AbstractNetworkValues, values) - setvalues!(nlp.inner, attr, values) -end -function transfer!(nlp::ProxALEvaluator, vm, va, pg, qg) - transfer!(nlp.inner, vm, va, pg, qg) -end - -# Initial position -function initial(nlp::ProxALEvaluator) - u0 = initial(nlp.inner) - s0 = copy(nlp.s_min) - return [u0; s0] -end - -# Bounds -function bounds(nlp::ProxALEvaluator, ::Variables) - u♭, u♯ = bounds(nlp.inner, Variables()) - return [u♭; nlp.s_min], [u♯; nlp.s_max] -end -bounds(nlp::ProxALEvaluator, ::Constraints) = bounds(nlp.inner, Constraints()) - -function update!(nlp::ProxALEvaluator, w) - u = @view w[1:nlp.nu] - s = @view w[nlp.nu+1:end] - conv = update!(nlp.inner, u) - pg = get(nlp.inner, PS.ActivePower()) - return conv -end - -## Update penalty terms -function update_multipliers!(nlp::ProxALEvaluator, ::Current, λt) - copyto!(nlp.λf, λt) -end -function update_multipliers!(nlp::ProxALEvaluator, ::Next, λt) - copyto!(nlp.λt, λt) -end - -function update_primal!(nlp::ProxALEvaluator, ::Previous, pgk) - copyto!(nlp.pg_f, pgk) -end -function update_primal!(nlp::ProxALEvaluator, ::Current, pgk) - copyto!(nlp.pg_ref, pgk) -end -function update_primal!(nlp::ProxALEvaluator, ::Next, pgk) - copyto!(nlp.pg_t, pgk) -end - -## Objective -function objective(nlp::ProxALEvaluator, w) - u = @view w[1:nlp.nu] - s = @view w[nlp.nu+1:end] - model = nlp.inner.model - buffer = get(nlp.inner, PhysicalState()) - return cost_penalty_ramping_constraints( - model, buffer, s, Int(nlp.time), - nlp.scale_objective, nlp.τ, nlp.λf, nlp.λt, nlp.ρf, nlp.ρt, nlp.pg_f, nlp.pg_ref, nlp.pg_t - ) -end - -## Gradient -function _gradient_control!(nlp::ProxALEvaluator, grad, w) - # Gradient wrt u - gu = @view grad[1:nlp.nu] - u = @view w[1:nlp.nu] - s = @view w[nlp.nu+1:end] - buffer = get(nlp.inner, PhysicalState()) - ∂obj = nlp.obj_stack - ju = ∂obj.stack.∇fᵤ ; jx = ∂obj.stack.∇fₓ - # Evaluate adjoint of cost function and update inplace AdjointStackObjective - adjoint_penalty_ramping_constraints!( - nlp.inner.model, ∂obj, buffer, s, Int(nlp.time), - nlp.scale_objective, nlp.τ, nlp.λf, nlp.λt, nlp.ρf, nlp.ρt, nlp.pg_f, nlp.pg_ref, nlp.pg_t - ) - reduced_gradient!(nlp.inner, gu, jx, ju, w) -end - -function _gradient_slack!(nlp::ProxALEvaluator, grad, w) - s = @view w[nlp.nu+1:end] - pg = get(nlp.inner, PS.ActivePower()) - # Gradient wrt s - gs = @view grad[nlp.nu+1:end] - if nlp.time != Origin - gs .= nlp.λf .+ nlp.ρf .* (nlp.pg_f .- pg .+ s) - else - gs .= 0.0 - end -end - -function gradient!(nlp::ProxALEvaluator, g, w) - _gradient_control!(nlp, g, w) - _gradient_slack!(nlp, g, w) - return nothing -end - -function constraint!(nlp::ProxALEvaluator, cons, w) - u = @view w[1:nlp.nu] - constraint!(nlp.inner, cons, u) -end - -function jacobian_structure(nlp::ProxALEvaluator) - m, n = n_constraints(nlp), n_variables(nlp) - nnzj = m * n - rows = zeros(Int, nnzj) - cols = zeros(Int, nnzj) - jacobian_structure!(nlp, rows, cols) - return rows, cols -end - -## Jacobian -function jacobian_structure!(nlp::ProxALEvaluator, rows, cols) - m, n = n_constraints(nlp), n_variables(nlp) - idx = 1 - for i in 1:n # number of variables - for c in 1:m #number of constraints - rows[idx] = c ; cols[idx] = i - idx += 1 - end - end -end - -function jacobian!(nlp::ProxALEvaluator, jac, w) - m = n_constraints(nlp) - u = @view w[1:nlp.nu] - Jᵤ = @view jac[:, 1:nlp.nu] - jacobian!(nlp.inner, Jᵤ, u) -end - -function jprod!(nlp::ProxALEvaluator, jv, w, v) - u = @view w[1:nlp.nu] - vu = @view v[1:nlp.nu] - jprod!(nlp.inner, jv, u, vu) -end - -## Transpose Jacobian-vector product -## ProxAL does not add any constraint to the reduced model -function jtprod!(nlp::ProxALEvaluator, jv, w, v) - u = @view w[1:nlp.nu] - jvu = @view jv[1:nlp.nu] - jtprod!(nlp.inner, jvu, u, v) -end - -function ojtprod!(nlp::ProxALEvaluator, jv, w, σ, v) - gu = @view jv[1:nlp.nu] - s = @view w[nlp.nu+1:end] - u = @view w[1:nlp.nu] - - # w.r.t. u - buffer = get(nlp.inner, PhysicalState()) - ∂obj = nlp.obj_stack - jvx = ∂obj.stack.jvₓ ; fill!(jvx, 0) - jvu = ∂obj.stack.jvᵤ ; fill!(jvu, 0) - - # Evaluate adjoint of cost function and update inplace AdjointStackObjective - adjoint_penalty_ramping_constraints!( - nlp.inner.model, ∂obj, buffer, s, Int(nlp.time), - nlp.scale_objective, nlp.τ, nlp.λf, nlp.λt, nlp.ρf, nlp.ρt, nlp.pg_f, nlp.pg_ref, nlp.pg_t - ) - copyto!(jvx, ∂obj.stack.∇fₓ) - copyto!(jvu, ∂obj.stack.∇fᵤ) - # compute gradient of objective - jvx .*= σ - jvu .*= σ - # compute transpose Jacobian vector product of constraints - full_jtprod!(nlp.inner, jvx, jvu, u, v) - # Evaluate gradient in reduced space - reduced_gradient!(nlp.inner, gu, jvx, jvu, w) - - # w.r.t. s - _gradient_slack!(nlp, jv, w) -end - -#= - For ProxAL, we have: - H = [ H_xx H_ux J_x' ] - [ H_xu H_uu J_u' ] - [ J_x J_u ρ I ] - - so, if `v = [v_x; v_u; v_s]`, we get - - H * v = [ H_xx v_x + H_ux v_u + J_x' v_s ] - [ H_xu v_x + H_uu v_u + J_u' v_s ] - [ J_x v_x + J_u v_u + ρ I ] - -=# -function hessprod!(nlp::ProxALEvaluator, hessvec, w, v) - @assert nlp.inner.has_hessian - @assert nlp.inner.has_jacobian - - model = nlp.inner.model - nx = get(model, NumberOfState()) - nu = get(model, NumberOfControl()) - - u = @view w[1:nlp.nu] - vᵤ = @view v[1:nlp.nu] - vₛ = @view v[1+nlp.nu:end] - - fill!(hessvec, 0.0) - - hvu = @view hessvec[1:nlp.nu] - hvs = @view hessvec[1+nlp.nu:end] - - ## OBJECTIVE HESSIAN - σ = 1.0 - hessprod!(nlp.inner, hvu, u, vᵤ) - - # Contribution of slack node - if nlp.time != Origin - hvs .+= nlp.ρf .* vₛ - # TODO: implement block corresponding to Jacobian - # and transpose-Jacobian - end - return -end - -## Utils function -function reset!(nlp::ProxALEvaluator) - reset!(nlp.inner) - # Reset multipliers - fill!(nlp.λf, 0) - fill!(nlp.λt, 0) - # Reset proximal centers - fill!(nlp.pg_f, 0) - fill!(nlp.pg_ref, 0) - fill!(nlp.pg_t, 0) -end - -function primal_infeasibility!(nlp::ProxALEvaluator, cons, w) - @assert length(w) == nlp.nu + nlp.ng - u = @view w[1:nlp.nu] - return primal_infeasibility!(nlp.inner, cons, u) -end -function primal_infeasibility(nlp::ProxALEvaluator, w) - @assert length(w) == nlp.nu + nlp.ng - u = @view w[1:nlp.nu] - return primal_infeasibility(nlp.inner, u) -end diff --git a/src/Evaluators/reduced_evaluator.jl b/src/Evaluators/reduced_evaluator.jl deleted file mode 100644 index 0d180ac6..00000000 --- a/src/Evaluators/reduced_evaluator.jl +++ /dev/null @@ -1,694 +0,0 @@ - -""" - ReducedSpaceEvaluator{T, VI, VT, MT, Jacx, Jacu, JacCons, Hess} <: AbstractNLPEvaluator - -Reduced-space evaluator projecting the optimization problem -into the powerflow manifold defined by the nonlinear equation ``g(x, u) = 0``. -The state ``x`` is defined implicitly, as a function of the control -``u``. Hence, the powerflow equation is implicitly satisfied -when we are using this evaluator. - -Once a new point `u` is passed to the evaluator, -the user needs to call the method `update!` to find the corresponding -state ``x(u)`` satisfying the balance equation ``g(x(u), u) = 0``. - -Taking as input a [`PolarForm`](@ref) structure, the reduced evaluator -builds the bounds corresponding to the control `u`, -The reduced evaluator could be instantiated on the host memory, or on a specific device -(currently, only CUDA is supported). - -## Examples - -```julia-repl -julia> datafile = "case9.m" # specify a path to a MATPOWER instance -julia> nlp = ReducedSpaceEvaluator(datafile) -A ReducedSpaceEvaluator object - * device: KernelAbstractions.CPU() - * #vars: 5 - * #cons: 10 - * constraints: - - voltage_magnitude_constraints - - active_power_constraints - - reactive_power_constraints - * linear solver: ExaPF.LinearSolvers.DirectSolver() -``` - -If a GPU is available, we could instantiate `nlp` as - -```julia-repl -julia> nlp_gpu = ReducedSpaceEvaluator(datafile; device=CUDADevice()) -A ReducedSpaceEvaluator object - * device: KernelAbstractions.CUDADevice() - * #vars: 5 - * #cons: 10 - * constraints: - - voltage_magnitude_constraints - - active_power_constraints - - reactive_power_constraints - * linear solver: ExaPF.LinearSolvers.DirectSolver() - -``` - -## Note -Mathematically, we set apart the state ``x`` from the control ``u``, -and use a third variable ``y`` --- the by-product --- to denote the remaining -values of the network. -In the implementation of `ReducedSpaceEvaluator`, -we only deal with a control `u` and an attribute `buffer`, -storing all the physical values needed to describe the network. -The attribute `buffer` stores the values of the control `u`, the state `x` -and the by-product `y`. Each time we are calling the method `update!`, -the values of the control are copied into the buffer. - -""" -mutable struct ReducedSpaceEvaluator{T, VI, VT, MT, Jacx, Jacu, JacCons, HessLag} <: AbstractNLPEvaluator - model::PolarForm{T, VI, VT, MT} - λ::VT - - u_min::VT - u_max::VT - - constraints::Vector{Function} - g_min::VT - g_max::VT - - # Cache - buffer::PolarNetworkState{VI, VT} - # AutoDiff - state_jacobian::FullSpaceJacobian{Jacx, Jacu} - obj_stack::AutoDiff.TapeMemory{typeof(cost_production), AdjointStackObjective{VT}, Nothing} - cons_stacks::Vector{AutoDiff.TapeMemory} # / constraints - constraint_jacobians::JacCons - hesslag::HessLag - - # Options - linear_solver::LinearSolvers.AbstractLinearSolver - powerflow_solver::AbstractNonLinearSolver - has_jacobian::Bool - update_jacobian::Bool - has_hessian::Bool -end - -function ReducedSpaceEvaluator( - model::PolarForm{T, VI, VT, MT}; - constraints=Function[voltage_magnitude_constraints, active_power_constraints, reactive_power_constraints], - linear_solver=nothing, - powerflow_solver=NewtonRaphson(tol=1e-12), - want_jacobian=true, - nbatch_hessian=1, -) where {T, VI, VT, MT} - # First, build up a network buffer - buffer = get(model, PhysicalState()) - # Populate buffer with default values of the network, as stored - # inside model - init_buffer!(model, buffer) - - u_min, u_max = bounds(model, Control()) - λ = similar(buffer.dx) - - m = sum([size_constraint(model, cons) for cons in constraints]) - g_min = VT(undef, m) - g_max = VT(undef, m) - - shift = 1 - for cons in constraints - cb, cu = bounds(model, cons) - m = size_constraint(model, cons) - copyto!(g_min, shift, cb, 1, m) - copyto!(g_max, shift, cu, 1, m) - shift += m - end - - SpMT = isa(model.device, CPU) ? SparseMatrixCSC : CUSPARSE.CuSparseMatrixCSR - # Build Linear Algebra - J = powerflow_jacobian(model) |> SpMT - _linear_solver = isnothing(linear_solver) ? DirectSolver(J) : linear_solver - - obj_ad = pullback_objective(model) - state_ad = FullSpaceJacobian(model, power_balance) - cons_ad = AutoDiff.TapeMemory[] - for cons in constraints - push!(cons_ad, AutoDiff.TapeMemory(model, cons, VT)) - end - - # Jacobians - cons_jac = nothing - if want_jacobian - cons_jac = ConstraintsJacobianStorage(model, constraints) - end - - # Hessians - want_hessian = (nbatch_hessian > 0) - hess_ad = nothing - if want_hessian - hess_ad = if nbatch_hessian > 1 - BatchHessianLagrangian(model, J, nbatch_hessian) - else - HessianLagrangian(model, J) - end - end - - return ReducedSpaceEvaluator( - model, λ, u_min, u_max, - constraints, g_min, g_max, - buffer, - state_ad, obj_ad, cons_ad, cons_jac, hess_ad, - _linear_solver, powerflow_solver, want_jacobian, false, want_hessian, - ) -end -function ReducedSpaceEvaluator(datafile::String; device=KA.CPU(), options...) - return ReducedSpaceEvaluator(PolarForm(datafile, device); options...) -end - -array_type(nlp::ReducedSpaceEvaluator) = array_type(nlp.model) -backend(nlp::ReducedSpaceEvaluator) = nlp.model - -n_variables(nlp::ReducedSpaceEvaluator) = length(nlp.u_min) -n_constraints(nlp::ReducedSpaceEvaluator) = length(nlp.g_min) - -constraints_type(::ReducedSpaceEvaluator) = :inequality -has_hessian(nlp::ReducedSpaceEvaluator) = nlp.has_hessian -number_batches_hessian(nlp::ReducedSpaceEvaluator) = nlp.has_hessian ? n_batches(nlp.hesslag) : 0 - -adjoint_jacobian(nlp::ReducedSpaceEvaluator, ::State) = nlp.state_jacobian.x.J -adjoint_jacobian(nlp::ReducedSpaceEvaluator, ::Control) = nlp.state_jacobian.u.J - -# Getters -get(nlp::ReducedSpaceEvaluator, ::Constraints) = nlp.constraints -function get(nlp::ReducedSpaceEvaluator, ::State) - x = similar(nlp.λ) ; fill!(x, 0) - get!(nlp.model, State(), x, nlp.buffer) - return x -end -get(nlp::ReducedSpaceEvaluator, ::PhysicalState) = nlp.buffer - -# Physics -get(nlp::ReducedSpaceEvaluator, ::PS.VoltageMagnitude) = nlp.buffer.vmag -get(nlp::ReducedSpaceEvaluator, ::PS.VoltageAngle) = nlp.buffer.vang -get(nlp::ReducedSpaceEvaluator, ::PS.ActivePower) = nlp.buffer.pgen -get(nlp::ReducedSpaceEvaluator, ::PS.ReactivePower) = nlp.buffer.qgen -function get(nlp::ReducedSpaceEvaluator, attr::PS.AbstractNetworkAttribute) - return get(nlp.model, attr) -end - -# Setters -function setvalues!(nlp::ReducedSpaceEvaluator, attr::PS.AbstractNetworkValues, values) - setvalues!(nlp.model, attr, values) -end -function setvalues!(nlp::ReducedSpaceEvaluator, attr::PS.ActiveLoad, values) - setvalues!(nlp.buffer, attr, values) -end -function setvalues!(nlp::ReducedSpaceEvaluator, attr::PS.ReactiveLoad, values) - setvalues!(nlp.buffer, attr, values) -end - -# Transfer network values inside buffer -function transfer!( - nlp::ReducedSpaceEvaluator, vm, va, pg, qg, -) - setvalues!(nlp.buffer, PS.VoltageMagnitude(), vm) - setvalues!(nlp.buffer, PS.VoltageAngle(), va) - setvalues!(nlp.buffer, PS.ActivePower(), pg) - setvalues!(nlp.buffer, PS.ReactivePower(), qg) -end - -# Initial position -function initial(nlp::ReducedSpaceEvaluator) - u = similar(nlp.u_min) ; fill!(u, 0.0) - return get!(nlp.model, Control(), u, nlp.buffer) -end - -# Bounds -bounds(nlp::ReducedSpaceEvaluator, ::Variables) = (nlp.u_min, nlp.u_max) -bounds(nlp::ReducedSpaceEvaluator, ::Constraints) = (nlp.g_min, nlp.g_max) - -## Callbacks -function update!(nlp::ReducedSpaceEvaluator, u) - jac_x = nlp.state_jacobian.x - # Transfer control u into the network cache - transfer!(nlp.model, nlp.buffer, u) - # Get corresponding point on the manifold - conv = powerflow( - nlp.model, - jac_x, - nlp.buffer, - nlp.powerflow_solver; - linear_solver=nlp.linear_solver - ) - - if !conv.has_converged - error("Newton-Raphson algorithm failed to converge ($(conv.norm_residuals))") - return conv - end - - # Evaluate Jacobian of power flow equation on current u - AutoDiff.jacobian!(nlp.model, nlp.state_jacobian.u, nlp.buffer) - # Specify that constraint's Jacobian is not up to date - nlp.update_jacobian = nlp.has_jacobian - # Update Hessian factorization - if !isnothing(nlp.hesslag) - ∇gₓ = nlp.state_jacobian.x.J - update_factorization!(nlp.hesslag, ∇gₓ) - # Update values for Hessian's AutoDiff - update_hessian!(nlp.model, nlp.hesslag.hess, nlp.buffer) - end - return conv -end - -# TODO: determine if we should include λ' * g(x, u), even if ≈ 0 -function objective(nlp::ReducedSpaceEvaluator, u) - # Take as input the current cache, updated previously in `update!`. - return cost_production(nlp.model, nlp.buffer) -end - -function constraint!(nlp::ReducedSpaceEvaluator, g, u) - ϕ = nlp.buffer - mf = 1::Int - mt = 0::Int - for cons in nlp.constraints - m_ = size_constraint(nlp.model, cons)::Int - mt += m_ - cons_ = @view(g[mf:mt]) - cons(nlp.model, cons_, ϕ) - mf += m_ - end -end - -function _backward_solve!(nlp::ReducedSpaceEvaluator, y, x) - ∇gₓ = nlp.state_jacobian.x.J - if isa(nlp.linear_solver, LinearSolvers.AbstractIterativeLinearSolver) - # Iterative solver case - ∇gT = LinearSolvers.get_transpose(nlp.linear_solver, ∇gₓ) - # Switch preconditioner to transpose mode - LinearSolvers.update!(nlp.linear_solver, ∇gT) - # Compute adjoint and store value inside λₖ - LinearSolvers.ldiv!(nlp.linear_solver, y, ∇gT, x) - elseif isa(y, CUDA.CuArray) - ∇gT = LinearSolvers.get_transpose(nlp.linear_solver, ∇gₓ) - LinearSolvers.ldiv!(nlp.linear_solver, y, ∇gT, x) - else - LinearSolvers.rdiv!(nlp.linear_solver, y, x) - end -end - -### -# First-order code -#### -# -# compute inplace reduced gradient (g = ∇fᵤ + (∇gᵤ')*λ) -# equivalent to: g = ∇fᵤ - (∇gᵤ')*λ_neg -# (take λₖ_neg to avoid computing an intermediate array) -function reduced_gradient!( - nlp::ReducedSpaceEvaluator, grad, ∂fₓ, ∂fᵤ, λ, u, -) - ∇gᵤ = nlp.state_jacobian.u.J - ∇gₓ = nlp.state_jacobian.x.J - - # λ = ∇gₓ' \ ∂fₓ - _backward_solve!(nlp, λ, ∂fₓ) - - grad .= ∂fᵤ - mul!(grad, transpose(∇gᵤ), λ, -1.0, 1.0) - return -end -function reduced_gradient!(nlp::ReducedSpaceEvaluator, grad, ∂fₓ, ∂fᵤ, u) - reduced_gradient!(nlp::ReducedSpaceEvaluator, grad, ∂fₓ, ∂fᵤ, nlp.λ, u) -end - -# Compute only full gradient wrt x and u -function full_gradient!(nlp::ReducedSpaceEvaluator, gx, gu, u) - buffer = nlp.buffer - ∂obj = nlp.obj_stack - # Evaluate adjoint of cost function and update inplace AdjointStackObjective - gradient_objective!(nlp.model, ∂obj, buffer) - copyto!(gx, ∂obj.stack.∇fₓ) - copyto!(gu, ∂obj.stack.∇fᵤ) -end - -function gradient!(nlp::ReducedSpaceEvaluator, g, u) - buffer = nlp.buffer - # Evaluate adjoint of cost function and update inplace AdjointStackObjective - gradient_objective!(nlp.model, nlp.obj_stack, buffer) - ∇fₓ, ∇fᵤ = nlp.obj_stack.stack.∇fₓ, nlp.obj_stack.stack.∇fᵤ - - reduced_gradient!(nlp, g, ∇fₓ, ∇fᵤ, u) - return -end - -function jacobian_structure(nlp::ReducedSpaceEvaluator) - m, n = n_constraints(nlp), n_variables(nlp) - nnzj = m * n - rows = zeros(Int, nnzj) - cols = zeros(Int, nnzj) - jacobian_structure!(nlp, rows, cols) - return rows, cols -end - -function jacobian_structure!(nlp::ReducedSpaceEvaluator, rows, cols) - m, n = n_constraints(nlp), n_variables(nlp) - idx = 1 - for i in 1:n # number of variables - for c in 1:m #number of constraints - rows[idx] = c ; cols[idx] = i - idx += 1 - end - end -end - -function _update_full_jacobian_constraints!(nlp) - if nlp.update_jacobian - update_full_jacobian!(nlp.model, nlp.constraint_jacobians, nlp.buffer) - nlp.update_jacobian = false - end -end - -function jprod!(nlp::ReducedSpaceEvaluator, jm, u, v) - nᵤ = length(u) - m = n_constraints(nlp) - @assert nᵤ == size(v, 1) - - _update_full_jacobian_constraints!(nlp) - H = nlp.hesslag - ∇gᵤ = nlp.state_jacobian.u.J - - # Arrays - Jx = nlp.constraint_jacobians.Jx - Ju = nlp.constraint_jacobians.Ju - z = H.z - - # init RHS - mul!(z, ∇gᵤ, v) - LinearAlgebra.ldiv!(H.lu, z) - - # jv .= Ju * v .- Jx * z - mul!(jm, Ju, v) - mul!(jm, Jx, z, -1.0, 1.0) - return -end - -function full_jtprod!(nlp::ReducedSpaceEvaluator, jvx, jvu, u, v) - fr_ = 0::Int - for (cons, stack) in zip(nlp.constraints, nlp.cons_stacks) - n = size_constraint(nlp.model, cons)::Int - mask = fr_+1:fr_+n - vv = @view v[mask] - # Compute jtprod of current constraint - jacobian_transpose_product!(nlp.model, stack, nlp.buffer, vv) - jvx .+= stack.stack.∂x - jvu .+= stack.stack.∂u - fr_ += n - end -end - -function jtprod!(nlp::ReducedSpaceEvaluator, jv, u, v) - ∂obj = nlp.obj_stack - μ = nlp.buffer.balance - jvx = ∂obj.stack.jvₓ ; fill!(jvx, 0) - jvu = ∂obj.stack.jvᵤ ; fill!(jvu, 0) - full_jtprod!(nlp, jvx, jvu, u, v) - reduced_gradient!(nlp, jv, jvx, jvu, μ, u) -end - -function ojtprod!(nlp::ReducedSpaceEvaluator, jv, u, σ, v) - ∂obj = nlp.obj_stack - jvx = ∂obj.stack.jvₓ ; fill!(jvx, 0) - jvu = ∂obj.stack.jvᵤ ; fill!(jvu, 0) - # compute gradient of objective - full_gradient!(nlp, jvx, jvu, u) - jvx .*= σ - jvu .*= σ - # compute transpose Jacobian vector product of constraints - full_jtprod!(nlp, jvx, jvu, u, v) - # Evaluate gradient in reduced space - reduced_gradient!(nlp, jv, jvx, jvu, u) - return -end - -### -# Second-order code -#### -# Single version -function full_hessprod!(nlp::ReducedSpaceEvaluator, hv::AbstractVector, y::AbstractVector, tgt::AbstractVector) - nx, nu = get(nlp.model, NumberOfState()), get(nlp.model, NumberOfControl()) - H = nlp.hesslag - AutoDiff.adj_hessian_prod!(nlp.model, H.hess, hv, nlp.buffer, y, tgt) - ∂fₓ = @view hv[1:nx] - ∂fᵤ = @view hv[nx+1:nx+nu] - return ∂fₓ , ∂fᵤ -end - -# Batch version -function full_hessprod!(nlp::ReducedSpaceEvaluator, hv::AbstractMatrix, y::AbstractMatrix, tgt::AbstractMatrix) - nx, nu = get(nlp.model, NumberOfState()), get(nlp.model, NumberOfControl()) - H = nlp.hesslag - batch_adj_hessian_prod!(nlp.model, H.hess, hv, nlp.buffer, y, tgt) - ∂fₓ = hv[1:nx, :] - ∂fᵤ = hv[nx+1:nx+nu, :] - return ∂fₓ , ∂fᵤ -end - -function hessprod!(nlp::ReducedSpaceEvaluator, hessvec, u, w) - @assert nlp.hesslag != nothing - - nx = get(nlp.model, NumberOfState()) - nu = get(nlp.model, NumberOfControl()) - H = nlp.hesslag - ∇gᵤ = nlp.state_jacobian.u.J - - # Number of batches - nbatch = size(w, 2) - @assert nbatch == size(H.z, 2) == size(hessvec, 2) - - # Load variables and buffers - tgt = H.tmp_tgt - hv = H.tmp_hv - y = H.y - z = H.z - ψ = H.ψ - - # Step 1: computation of first second-order adjoint - mul!(z, ∇gᵤ, w, -1.0, 0.0) - LinearAlgebra.ldiv!(H.lu, z) - - # Init tangent with z and w - for i in 1:nbatch - mxu = 1 + (i-1)*(nx+nu) - mx = 1 + (i-1)*nx - mu = 1 + (i-1)*nu - copyto!(tgt, mxu, z, mx, nx) - copyto!(tgt, mxu+nx, w, mu, nu) - end - - # Init adjoint - fill!(y, 0.0) - y[end:end] .= 1.0 # / objective - y[1:nx] .-= nlp.λ # / power balance - - # STEP 2: AutoDiff - ∂fₓ, ∂fᵤ = full_hessprod!(nlp, hv, y, tgt) - - # STEP 3: computation of second second-order adjoint - copyto!(ψ, ∂fₓ) - LinearAlgebra.ldiv!(H.adjlu, ψ) - - hessvec .= ∂fᵤ - mul!(hessvec, transpose(∇gᵤ), ψ, -1.0, 1.0) - - return -end - -function hessian_lagrangian_penalty_prod!( - nlp::ReducedSpaceEvaluator, hessvec, u, y, σ, D, w, -) - @assert nlp.hesslag != nothing - - nbatch = size(w, 2) - nx = get(nlp.model, NumberOfState()) - nu = get(nlp.model, NumberOfControl()) - buffer = nlp.buffer - H = nlp.hesslag - ∇gᵤ = nlp.state_jacobian.u.J - - fill!(hessvec, 0.0) - - z = H.z - ψ = H.ψ - ∇gᵤ = nlp.state_jacobian.u.J - mul!(z, ∇gᵤ, w, -1.0, 0.0) - LinearAlgebra.ldiv!(H.lu, z) - - # Two vector products - μ = H.y - tgt = H.tmp_tgt - hv = H.tmp_hv - - # Init tangent with z and w - for i in 1:nbatch - mxu = 1 + (i-1)*(nx+nu) - mx = 1 + (i-1)*nx - mu = 1 + (i-1)*nu - copyto!(tgt, mxu, z, mx, nx) - copyto!(tgt, mxu+nx, w, mu, nu) - end - - ## OBJECTIVE HESSIAN - fill!(μ, 0.0) - μ[1:nx] .-= nlp.λ # / power balance - μ[end:end] .= σ # / objective - # / constraints - shift_m = nx - shift_y = size_constraint(nlp.model, voltage_magnitude_constraints) - for cons in nlp.constraints - isa(cons, typeof(voltage_magnitude_constraints)) && continue - m = size_constraint(nlp.model, cons)::Int - μ[shift_m+1:m+shift_m] .= view(y, shift_y+1:shift_y+m) - shift_m += m - shift_y += m - end - - ∇²Lx, ∇²Lu = full_hessprod!(nlp, hv, μ, tgt) - - # Add Hessian of quadratic penalty - m = length(y) - diagjac = (nbatch > 1) ? similar(y, m, nbatch) : similar(y) - _update_full_jacobian_constraints!(nlp) - Jx = nlp.constraint_jacobians.Jx - Ju = nlp.constraint_jacobians.Ju - # ∇²Lx .+= Jx' * (D * (Jx * z)) .+ Jx' * (D * (Ju * w)) - # ∇²Lu .+= Ju' * (D * (Jx * z)) .+ Ju' * (D * (Ju * w)) - mul!(diagjac, Jx, z) - mul!(diagjac, Ju, w, 1.0, 1.0) - diagjac .*= D - mul!(∇²Lx, Jx', diagjac, 1.0, 1.0) - mul!(∇²Lu, Ju', diagjac, 1.0, 1.0) - - # Second order adjoint - copyto!(ψ, ∇²Lx) - LinearAlgebra.ldiv!(H.adjlu, ψ) - - hessvec .+= ∇²Lu - mul!(hessvec, transpose(∇gᵤ), ψ, -1.0, 1.0) - - return -end - -# Batch Hessian -macro define_batch_hessian(function_name, target_function, args...) - fname_dispatch = Symbol("_" * String(function_name)) - fname = Symbol(function_name) - argstup = Tuple(args) - quote - function $(esc(fname_dispatch))(nlp::ReducedSpaceEvaluator, hesslag::BatchHessianLagrangian, dest, $(map(esc, argstup)...)) - @assert has_hessian(nlp) - @assert n_batches(hesslag) > 1 - n = ExaPF.n_variables(nlp) - ∇²f = hesslag.hess - nbatch = size(hesslag.tmp_hv, 2) - - # Allocate memory - v_cpu = zeros(n, nbatch) - v = similar(x, n, nbatch) - - N = div(n, nbatch, RoundDown) - for i in 1:N - # Init tangents on CPU - fill!(v_cpu, 0.0) - @inbounds for j in 1:nbatch - v_cpu[j+(i-1)*nbatch, j] = 1.0 - end - # Pass tangents to the device - copyto!(v, v_cpu) - - hm = @view dest[:, nbatch * (i-1) + 1: nbatch * i] - $target_function(nlp, hm, $(map(esc, argstup)...), v) - end - - # Last slice - last_batch = n - N*nbatch - if last_batch > 0 - fill!(v_cpu, 0.0) - @inbounds for j in 1:nbatch - v_cpu[n-nbatch+j, j] = 1.0 - end - copyto!(v, v_cpu) - - hm = @view dest[:, (n - nbatch + 1) : n] - $target_function(nlp, hm, $(map(esc, argstup)...), v) - end - end - function $(esc(fname_dispatch))(nlp::ReducedSpaceEvaluator, hesslag::HessianLagrangian, dest, $(map(esc, argstup)...)) - @assert has_hessian(nlp) - n = n_variables(nlp) - v_cpu = zeros(n) - v = similar(x) - @inbounds for i in 1:n - hv = @view dest[:, i] - fill!(v_cpu, 0) - v_cpu[i] = 1.0 - copyto!(v, v_cpu) - $target_function(nlp, hv, $(map(esc, argstup)...), v) - end - end - $(esc(fname))(nlp::ReducedSpaceEvaluator, dest, $(map(esc, argstup)...)) = $(esc(fname_dispatch))(nlp, nlp.hesslag, dest, $(map(esc, argstup)...)) - end -end - -@define_batch_hessian hessian! hessprod! x -@define_batch_hessian hessian_lagrangian_penalty! hessian_lagrangian_penalty_prod! x y σ D -@define_batch_hessian jacobian! jprod! x - - -# Return lower-triangular matrix -function hessian_structure(nlp::ReducedSpaceEvaluator) - n = n_variables(nlp) - rows = Int[r for r in 1:n for c in 1:r] - cols = Int[c for r in 1:n for c in 1:r] - return rows, cols -end - -# Utils function -function primal_infeasibility!(nlp::ReducedSpaceEvaluator, cons, u) - constraint!(nlp, cons, u) # Evaluate constraints - (n_inf, err_inf, n_sup, err_sup) = _check(cons, nlp.g_min, nlp.g_max) - return max(err_inf, err_sup) -end -function primal_infeasibility(nlp::ReducedSpaceEvaluator, u) - cons = similar(nlp.g_min) ; fill!(cons, 0) - return primal_infeasibility!(nlp, cons, u) -end - -# Printing -function sanity_check(nlp::ReducedSpaceEvaluator, u, cons) - println("Check violation of constraints") - print("Control \t") - (n_inf, err_inf, n_sup, err_sup) = _check(u, nlp.u_min, nlp.u_max) - @printf("UB: %.4e (%d) LB: %.4e (%d)\n", - err_sup, n_sup, err_inf, n_inf) - print("Constraints\t") - (n_inf, err_inf, n_sup, err_sup) = _check(cons, nlp.g_min, nlp.g_max) - @printf("UB: %.4e (%d) LB: %.4e (%d)\n", - err_sup, n_sup, err_inf, n_inf) -end - -function Base.show(io::IO, nlp::ReducedSpaceEvaluator) - n = n_variables(nlp) - m = n_constraints(nlp) - println(io, "A ReducedSpaceEvaluator object") - println(io, " * device: ", nlp.model.device) - println(io, " * #vars: ", n) - println(io, " * #cons: ", m) - println(io, " * constraints:") - for cons in nlp.constraints - println(io, " - ", cons) - end - print(io, " * linear solver: ", typeof(nlp.linear_solver)) -end - -function reset!(nlp::ReducedSpaceEvaluator) - # Reset adjoint - fill!(nlp.λ, 0) - # Reset buffer - init_buffer!(nlp.model, nlp.buffer) - return -end - diff --git a/src/Evaluators/slack_evaluator.jl b/src/Evaluators/slack_evaluator.jl deleted file mode 100644 index 46f3e5cf..00000000 --- a/src/Evaluators/slack_evaluator.jl +++ /dev/null @@ -1,279 +0,0 @@ - -@doc raw""" - SlackEvaluator{Evaluator<:AbstractNLPEvaluator, T, VT} <: AbstractNLPEvaluator - -Reformulate a problem with inequality constraints as an -equality constrained problem, by introducing a set of slack -variables. - -### Description -A `SlackEvaluator` takes as input an original [`AbstractNLPEvaluator`](@ref), -subject to inequality constraints -```math -\begin{aligned} - \min_{u \in \mathbb{R}^n} \quad & f(u)\\ -\mathrm{s.t.} \quad & h^♭ ≤ h(u) ≤ h^♯,\\ - & u^♭ ≤ u ≤ u^♯. -\end{aligned} -``` -The `SlackEvaluator` instance rewrites this problem with inequalities -as a new problem comprising only *equality constraints*, by introducing -$m$ slack variables $s_1, ⋯, s_m$. The new problem writes out -```math -\begin{aligned} - \min_{u \in \mathbb{R}^n, s \in \mathbb{R}^m} \quad & f(u)\\ - \mathrm{s.t.} \quad & h(u) - s = 0 \\ - & u^♭ ≤ u ≤ u^♯, \\ - & h^♭ ≤ s ≤ h^♯. -\end{aligned} -``` - -### Attributes - -- `inner::Evaluator`: original evaluator -- `s_min::VT`: stores lower bounds for slack variables -- `s_max::VT`: stores upper bounds for slack variables -- `nv::Int`: number of original variables -- `ns::Int`: number of slack variables - -""" -mutable struct SlackEvaluator{Evaluator<:AbstractNLPEvaluator, T, VT, MT} <: AbstractNLPEvaluator - inner::Evaluator - s_min::VT - s_max::VT - nv::Int - ns::Int - J::MT # Jacobian buffer - H::MT # Hessian buffer -end -function SlackEvaluator(nlp::AbstractNLPEvaluator) - if !is_constrained(nlp) - error("Input problem must have inequality constraints") - end - nv, ns = n_variables(nlp), n_constraints(nlp) - s_min, s_max = bounds(nlp, Constraints()) - if has_hessian(nlp) - H = similar(s_min, nv, nv) - J = similar(s_min, ns, nv) - else - H = nothing - J = nothing - end - return SlackEvaluator{typeof(nlp), eltype(s_min), typeof(s_min), typeof(J)}(nlp, s_min, s_max, nv, ns, J, H) -end -function SlackEvaluator( - datafile::String; - device=KA.CPU(), options... -) - nlp = ReducedSpaceEvaluator(datafile; device=device, options...) - return SlackEvaluator(nlp) -end - -n_variables(nlp::SlackEvaluator) = nlp.nv + nlp.ns -n_constraints(nlp::SlackEvaluator) = n_constraints(nlp.inner) - -constraints_type(::SlackEvaluator) = :equality -has_hessian(nlp::SlackEvaluator) = has_hessian(nlp.inner) -backend(nlp::SlackEvaluator) = backend(nlp.inner) - -# Getters -get(nlp::SlackEvaluator, attr::AbstractNLPAttribute) = get(nlp.inner, attr) -get(nlp::SlackEvaluator, attr::AbstractVariable) = get(nlp.inner, attr) -get(nlp::SlackEvaluator, attr::PS.AbstractNetworkAttribute) = get(nlp.inner, attr) - -# Setters -function setvalues!(nlp::SlackEvaluator, attr::PS.AbstractNetworkValues, values) - setvalues!(nlp.inner, attr, values) -end - -# Bounds -function bounds(nlp::SlackEvaluator{Ev, T, VT}, ::Variables) where {Ev, T, VT} - u♭, u♯ = bounds(nlp.inner, Variables()) - return [u♭; nlp.s_min] |> VT , [u♯; nlp.s_max] |> VT -end -function bounds(nlp::SlackEvaluator{Ev, T, VT}, ::Constraints) where {Ev, T, VT} - return (VT(zeros(nlp.ns)), VT(zeros(nlp.ns))) -end - -function initial(nlp::SlackEvaluator{Ev, T, VT}) where {Ev, T, VT} - u0 = initial(nlp.inner) - update!(nlp.inner, u0) - cons = constraint(nlp.inner, u0) - return [u0; -cons] |> VT -end - -function update!(nlp::SlackEvaluator, w) - u = @view w[1:nlp.nv] - return update!(nlp.inner, u) -end - -# f(x) = f₀(u) , with x = (u, s) -function objective(nlp::SlackEvaluator, w) - u = @view w[1:nlp.nv] - return objective(nlp.inner, u) -end - -# h(x) = h₀(u) - s -function constraint!(nlp::SlackEvaluator, cons, w) - u = @view w[1:nlp.nv] - s = @view w[nlp.nv+1:end] - constraint!(nlp.inner, cons, u) - cons .-= s - return -end - -# Gradient -# ∇f = [ ∇f₀ ; 0 ] -function gradient!(nlp::SlackEvaluator, grad, w) - # w.r.t. u - u = @view w[1:nlp.nv] - gu = @view grad[1:nlp.nv] - gradient!(nlp.inner, gu, u) - # w.r.t. s - gs = @view grad[nlp.nv+1:end] - fill!(gs, 0.0) - return -end - -## Transpose Jacobian-vector product -# N.B.: constraints are specified as h(u) - s = 0 -# J = [J₀ -I], so -# J' = [ J₀' ] -# [ -I ] -function jtprod!(nlp::SlackEvaluator, jv, w, v) - # w.r.t. u - u = @view w[1:nlp.nv] - jvu = @view jv[1:nlp.nv] - jtprod!(nlp.inner, jvu, u, v) - # w.r.t. s - jvs = @view jv[nlp.nv+1:end] - jvs .= -v - return -end - -function jacobian!(nlp::SlackEvaluator, jac, w) - fill!(jac, 0) - u = @view w[1:nlp.nv] - # w.r.t. u - Jᵤ = @view jac[:, 1:nlp.nv] - jacobian!(nlp.inner, Jᵤ, u) - # w.r.t. s - Jₛ = @view jac[:, nlp.nv+1:end] - ind = diagind(Jₛ) # extract diagonal - Jₛ[ind] .= -1.0 -end - -function ojtprod!(nlp::SlackEvaluator, jv, w, σ, v) - # w.r.t. u - u = @view w[1:nlp.nv] - jvu = @view jv[1:nlp.nv] - ojtprod!(nlp.inner, jvu, u, σ, v) - # w.r.t. s - jvs = @view jv[nlp.nv+1:end] - jvs .= -v - return -end - -# H = [ H₀ 0 ] -# [ 0 0 ] -function hessprod!(nlp::SlackEvaluator, hessvec, w, v) - # w.r.t. u - @views hessprod!(nlp.inner, hessvec[1:nlp.nv], w[1:nlp.nv], v[1:nlp.nv]) - # w.r.t. s - hus = @view hessvec[nlp.nv+1:end] - hus .= 0.0 - return -end - -# J = [Jᵤ -I] , hence -# J' * J = [ Jᵤ' * Jᵤ - Jᵤ'] -# [ - Jᵤ I ] -function hessian_lagrangian_penalty_prod!( - nlp::SlackEvaluator, hessvec, x, y, σ, ρ, v, -) - @views begin - u = x[1:nlp.nv] - vᵤ = v[1:nlp.nv] - vₛ = v[nlp.nv+1:end] - hvu = hessvec[1:nlp.nv] - hvs = hessvec[nlp.nv+1:end] - end - fill!(hessvec, 0) - u_buf = similar(u) ; fill!(u_buf, 0) - y_buf = similar(y) ; fill!(y_buf, 0) - # w.r.t. uu - # ∇²L + ρ Jᵤ' * Jᵤ - hessian_lagrangian_penalty_prod!(nlp.inner, hvu, u, y, σ, ρ, vᵤ) - - if !iszero(ρ) - # w.r.t. us - y_buf .= ρ .* vₛ - # - Jᵤ' * vₛ - jtprod!(nlp.inner, u_buf, u, y_buf) - hvu .-= u_buf - # w.r.t. su - jprod!(nlp.inner, y_buf, u, vᵤ) - hvs .-= ρ .* y_buf - # w.r.t. ss - hvs .+= ρ .* vₛ - end - return -end - -function hessian_lagrangian_penalty!( - nlp::SlackEvaluator, H, x, y, σ, w, -) - n = n_variables(nlp) - @views begin - u = x[1:nlp.nv] - Hᵤᵤ = H[1:nlp.nv, 1:nlp.nv] - Hᵤᵥ = H[1:nlp.nv, 1+nlp.nv:n] - Hᵥᵤ = H[1+nlp.nv:n, 1:nlp.nv] - Hᵥᵥ = H[1+nlp.nv:n, 1+nlp.nv:n] - end - # w.r.t. uu - # ∇²L + ρ Jᵤ' * Jᵤ - # - # Passing a contiguous array for H is more appropriate - # than passing a non-contiguous view - hessian_lagrangian_penalty!(nlp.inner, nlp.H, u, y, σ, w) - copyto!(Hᵤᵤ, nlp.H) - - if !iszero(w) - D = Diagonal(w) - Jᵤ = nlp.J ; fill!(Jᵤ, 0.0) - jacobian!(nlp.inner, Jᵤ, u) - mul!(Hᵤᵥ, Jᵤ', -D) - mul!(Hᵥᵤ, - D, Jᵤ) - fill!(Hᵥᵥ, 0) - ind = diagind(Hᵥᵥ) # extract coefficients on the diagonal - Hᵥᵥ[ind] .= w - else - fill!(Hᵤᵥ, 0) - fill!(Hᵥᵤ, 0) - fill!(Hᵥᵥ, 0) - end -end - -# TODO: return sparse sparsity pattern for bottom-left block -function hessian_structure(nlp::SlackEvaluator) - n = n_variables(nlp) - rows = Int[r for r in 1:n for c in 1:r] - cols = Int[c for r in 1:n for c in 1:r] - return rows, cols -end - -function reset!(nlp::SlackEvaluator) - reset!(nlp.inner) -end - -# Utils function -function primal_infeasibility!(nlp::SlackEvaluator, cons, u) - constraint!(nlp, cons, u) # Evaluate constraints - return xnorm_inf(cons) -end -function primal_infeasibility(nlp::SlackEvaluator, u) - cons = similar(nlp.s_min) ; fill!(cons, 0) - return primal_infeasibility!(nlp, cons, u) -end - diff --git a/src/ExaPF.jl b/src/ExaPF.jl index 289432ed..a2aa875d 100644 --- a/src/ExaPF.jl +++ b/src/ExaPF.jl @@ -13,8 +13,6 @@ import CUDA.CUSOLVER import ForwardDiff using KernelAbstractions const KA = KernelAbstractions -import MathOptInterface -const MOI = MathOptInterface using TimerOutputs: @timeit, TimerOutput import Base: show, get @@ -27,6 +25,7 @@ const TIMER = TimerOutput() include("utils.jl") include("architectures.jl") + # Templates include("models.jl") @@ -44,7 +43,4 @@ const LS = LinearSolvers # Polar formulation include("Polar/polar.jl") -# Evaluators -include("Evaluators/Evaluators.jl") - end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index abf6df81..d9725adf 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -122,6 +122,10 @@ function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::AbstractArray, LinearAlgebra.ldiv!(y, s.factorization', x) # Forward-backward solve return 0 end +function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::Array, J::SparseMatrixCSC, x::Array) + LinearAlgebra.ldiv!(y, s.factorization', x) # Forward-backward solve + return 0 +end function ldiv!(::DirectSolver{Nothing}, y::Vector, J::AbstractMatrix, x::Vector) F = lu(J) @@ -153,6 +157,12 @@ function ldiv!(::DirectSolver{Nothing}, end get_transpose(::DirectSolver, M::CUSPARSE.CuSparseMatrixCSR) = CUSPARSE.CuSparseMatrixCSC(M) +function rdiv!(s::DirectSolver{<:LinearAlgebra.Factorization}, y::CUDA.CuVector, J::CUSPARSE.CuSparseMatrixCSR, x::CUDA.CuVector) + Jt = get_transpose(s, J) + csclsvqr!(Jt, x, y, 1e-8, one(Cint), 'O') + return 0 +end + function update_preconditioner!(solver::AbstractIterativeLinearSolver, J, device) update(solver.precond, J, device) end diff --git a/src/Polar/polar.jl b/src/Polar/polar.jl index e164d80d..05714d62 100644 --- a/src/Polar/polar.jl +++ b/src/Polar/polar.jl @@ -132,8 +132,6 @@ end # Convenient constructor PolarForm(datafile::String, device) = PolarForm(PS.PowerNetwork(datafile), device) -array_type(polar::PolarForm) = array_type(polar.device) - # Getters function get(polar::PolarForm, ::NumberOfState) npv = PS.get(polar.network, PS.NumberOfPVBuses()) @@ -273,6 +271,12 @@ function powerflow_jacobian(polar) return matpower_jacobian(polar, State(), power_balance, v0) end +function powerflow_jacobian_device(polar) + SpMT = default_sparse_matrix(polar.device) + J = powerflow_jacobian(polar) + return J |> SpMT +end + function Base.show(io::IO, polar::PolarForm) # Network characteristics nbus = PS.get(polar.network, PS.NumberOfBuses()) diff --git a/src/architectures.jl b/src/architectures.jl index aad19d04..341cd883 100644 --- a/src/architectures.jl +++ b/src/architectures.jl @@ -1,9 +1,6 @@ abstract type AbstractArchitecture end -array_type(::KA.CPU) = Array -array_type(::KA.GPU) = CUDA.CuArray - # norm xnorm(x::AbstractVector) = norm(x, 2) xnorm(x::CUDA.CuVector) = CUBLAS.nrm2(x) @@ -13,3 +10,5 @@ xzeros(S, n) = fill!(S(undef, n), zero(eltype(S))) xones(S, n) = fill!(S(undef, n), one(eltype(S))) xnorm_inf(a) = maximum(abs.(a)) + +default_sparse_matrix(::CPU) = SparseMatrixCSC diff --git a/test/Evaluators/MOI_wrapper.jl b/test/Evaluators/MOI_wrapper.jl deleted file mode 100644 index 27c0d85d..00000000 --- a/test/Evaluators/MOI_wrapper.jl +++ /dev/null @@ -1,55 +0,0 @@ - -using Ipopt -using MathOptInterface - -const MOI = MathOptInterface - -@testset "MOI wrapper" begin - CASE57_SOLUTION = [ - 1.0260825400262428, - 1.016218932360429, - 1.009467718490475, - 1.027857273911734, - 1.06, - 0.9962215167521776, - 0.9965415804936182, - 0.0, - 0.6, - 0.0, - 8.603379123083476, - 0.0, - 1.3982297290334753, - ] - - datafile = joinpath(INSTANCES_DIR, "case57.m") - nlp = ExaPF.ReducedSpaceEvaluator(datafile) - - @testset "ReducedSpaceEvaluator with L-BFGS" begin - optimizer = Ipopt.Optimizer() - MOI.set(optimizer, MOI.RawParameter("print_level"), 0) - MOI.set(optimizer, MOI.RawParameter("limited_memory_max_history"), 50) - MOI.set(optimizer, MOI.RawParameter("hessian_approximation"), "limited-memory") - MOI.set(optimizer, MOI.RawParameter("tol"), 1e-4) - - solution = ExaPF.optimize!(optimizer, nlp) - MOI.empty!(optimizer) - @test solution.status ∈ [MOI.OPTIMAL, MOI.LOCALLY_SOLVED] - @test solution.minimum ≈ 3.7589338e+04 - @test solution.minimizer ≈ CASE57_SOLUTION rtol=1e-5 - end - - # Test resolution with AugLagEvaluator and Ipopt, as used inside ProxAL - @testset "AugLagEvaluator with Hessian" begin - optimizer = Ipopt.Optimizer() - MOI.set(optimizer, MOI.RawParameter("print_level"), 0) - MOI.set(optimizer, MOI.RawParameter("tol"), 1e-4) - MOI.set(optimizer, MOI.RawParameter("max_iter"), 30) - aug = ExaPF.AugLagEvaluator(nlp, ExaPF.initial(nlp)) - - solution = ExaPF.optimize!(optimizer, aug) - MOI.empty!(optimizer) - @test solution.status ∈ [MOI.OPTIMAL, MOI.LOCALLY_SOLVED] - @test solution.minimum ≈ 25138.76310420395 - end -end - diff --git a/test/Evaluators/TestEvaluators.jl b/test/Evaluators/TestEvaluators.jl deleted file mode 100644 index c872f17b..00000000 --- a/test/Evaluators/TestEvaluators.jl +++ /dev/null @@ -1,95 +0,0 @@ -module TestEvaluators - -@eval Base.Experimental.@optlevel 0 - -using Test - -using FiniteDiff -using LinearAlgebra -using Random -using SparseArrays -using KernelAbstractions - -using ExaPF -import ExaPF: PowerSystem, LinearSolvers - -const PS = PowerSystem -const LS = LinearSolvers - -function myisless(a, b) - h_a = a |> Array - h_b = b |> Array - return h_a <= h_b -end - -function myisapprox(a, b; options...) - h_a = a |> Array - h_b = b |> Array - return isapprox(h_a, h_b; options...) -end - -include("powerflow.jl") -include("api.jl") -include("proxal_evaluator.jl") -include("auglag.jl") - -function _init(datafile, ::Type{ExaPF.ReducedSpaceEvaluator}, device) - constraints = Function[ - ExaPF.voltage_magnitude_constraints, - ExaPF.active_power_constraints, - ExaPF.reactive_power_constraints, - # ExaPF.flow_constraints, - ] - return ExaPF.ReducedSpaceEvaluator(datafile; device=device, constraints=constraints) -end -function _init(datafile, ::Type{ExaPF.ProxALEvaluator}, device) - nlp = ExaPF.ReducedSpaceEvaluator(datafile; device=device) - time = ExaPF.Normal - return ExaPF.ProxALEvaluator(nlp, time) -end -function _init(datafile, ::Type{ExaPF.SlackEvaluator}, device) - return ExaPF.SlackEvaluator(datafile; device=device) -end - -function runtests(datafile, device, AT) - @testset "Newton-Raphson resolution" begin - nlp = ExaPF.ReducedSpaceEvaluator(datafile; device=device, powerflow_solver=NewtonRaphson(tol=1e-6)) - test_powerflow_evaluator(nlp, device, AT) - end - @testset "$Evaluator Interface" for Evaluator in [ - ExaPF.ReducedSpaceEvaluator, - ExaPF.AugLagEvaluator, - ExaPF.ProxALEvaluator, - ExaPF.SlackEvaluator, - ExaPF.FeasibilityEvaluator, - ] - nlp = Evaluator(datafile; device=device) - test_evaluator_api(nlp, device, AT) - test_evaluator_callbacks(nlp, device, AT) - end - @testset "$Evaluator Hessian" for Evaluator in [ - ExaPF.ReducedSpaceEvaluator, - ExaPF.AugLagEvaluator, - ] - nlp = Evaluator(datafile; device=device) - test_evaluator_hessian(nlp, device, AT) - end - @testset "ReducedSpaceEvaluator BatchHessian" begin - nlp = ExaPF.ReducedSpaceEvaluator(datafile; device=device, nbatch_hessian=2) - test_evaluator_batch_hessian(nlp, device, AT) - end - @testset "ProxALEvaluator" begin - nlp = ExaPF.ReducedSpaceEvaluator(datafile; device=device) - test_proxal_evaluator(nlp, device, AT) - end - @testset "AugLagEvaluator with $Evaluator backend" for Evaluator in [ - ExaPF.ReducedSpaceEvaluator, - ExaPF.ProxALEvaluator, - ExaPF.SlackEvaluator, - ] - nlp = _init(datafile, Evaluator, device) - test_auglag_evaluator(nlp, device, AT) - end -end - -end diff --git a/test/Evaluators/api.jl b/test/Evaluators/api.jl deleted file mode 100644 index d48e2a6f..00000000 --- a/test/Evaluators/api.jl +++ /dev/null @@ -1,184 +0,0 @@ - -function test_evaluator_api(nlp, device, M) - # Test printing - println(devnull, nlp) - - n = ExaPF.n_variables(nlp) - m = ExaPF.n_constraints(nlp) - u = ExaPF.initial(nlp) - - u_min, u_max = ExaPF.bounds(nlp, ExaPF.Variables()) - g_min, g_max = ExaPF.bounds(nlp, ExaPF.Constraints()) - buffer = get(nlp, ExaPF.PhysicalState()) - - # Test consistence - @test n == length(u) - @test length(u_min) == length(u_max) == n - @test myisless(u_min, u_max) - @test length(g_min) == length(g_max) == m - if m > 0 - @test myisless(g_min, g_max) - end - - # Test API - @test isa(get(nlp, State()), AbstractVector) - @test isa(get(nlp, ExaPF.Constraints()), Array{Function}) - @test isa(get(nlp, State()), AbstractVector) - @test isa(buffer, ExaPF.AbstractBuffer) - @test ExaPF.constraints_type(nlp) in [:bound, :equality, :inequality] - - @test isa(ExaPF.has_hessian(nlp), Bool) - @test isa(ExaPF.has_hessian_lagrangian(nlp), Bool) - - # setters - nbus = get(nlp, PS.NumberOfBuses()) - loads = similar(u, nbus) ; fill!(loads, 1) - ExaPF.setvalues!(nlp, PS.ActiveLoad(), loads) - ExaPF.setvalues!(nlp, PS.ReactiveLoad(), loads) - - ExaPF.reset!(nlp) -end - -function test_evaluator_callbacks(nlp, device, M; rtol=1e-6) - # Wrap Evaluator to evaluate FiniteDiff on the CPU - # (finite_difference_gradient does not support `allowscalar(false)`) - bdg = ExaPF.BridgeDeviceEvaluator(nlp, ExaPF.CPU()) - - n = ExaPF.n_variables(nlp) - m = ExaPF.n_constraints(nlp) - u = ExaPF.initial(nlp) - - u_min, u_max = ExaPF.bounds(nlp, ExaPF.Variables()) - g_min, g_max = ExaPF.bounds(nlp, ExaPF.Constraints()) - - # 1/ update! function - conv = ExaPF.update!(nlp, u) - @test isa(conv, ExaPF.ConvergenceStatus) - @test conv.has_converged - - # 2/ objective function - c = ExaPF.objective(nlp, u) - @test isa(c, Real) - - # 3/ gradient! function - function reduced_cost(u_) - ExaPF.update!(bdg, u_) - return ExaPF.objective(bdg, u_) - end - g = similar(u) ; fill!(g, 0) - ExaPF.gradient!(nlp, g, u) - u0 = u |> Array - grad_fd = FiniteDiff.finite_difference_gradient(reduced_cost, u0) - @test myisapprox(grad_fd[:], g, rtol=1e-5) - - # Constraint - # 4/ Constraint - ## Evaluation of the constraints - if m > 0 - cons = similar(g_min) ; fill!(cons, 0) - ExaPF.constraint!(nlp, cons, u) - - ## Evaluation of the transpose-Jacobian product - jv = similar(u_min) ; fill!(jv, 0.0) - v = similar(g_min) ; fill!(v, 1.0) - h_v = v |> Array - h_cons = cons |> Array - ExaPF.jtprod!(nlp, jv, u, v) - function reduced_cons(u_) - ExaPF.update!(bdg, u_) - ExaPF.constraint!(bdg, h_cons, u_) - return dot(h_v, h_cons) - end - jv_fd = FiniteDiff.finite_difference_gradient(reduced_cons, u0) - - # TODO: rtol=1e-6 breaks on case30. Investigate why. - @test myisapprox(jv, jv_fd[:], rtol=1e-5) - - ## Evaluation of the Jacobian - J = ExaPF.jacobian(nlp, u) - # Test transpose Jacobian vector product - @test isapprox(jv, J' * v, rtol=rtol) - # Test Jacobian vector product - ExaPF.jprod!(nlp, v, u, jv) - @test isapprox(J * jv, v) - end - - ExaPF.reset!(nlp) -end - -function test_evaluator_hessian(nlp, device, M; rtol=1e-6) - n = ExaPF.n_variables(nlp) - @test ExaPF.has_hessian(nlp) - function reduced_cost(u_) - ExaPF.update!(nlp, u_) - return ExaPF.objective(nlp, u_) - end - u = ExaPF.initial(nlp) - ExaPF.update!(nlp, u) - ExaPF.gradient(nlp, u) # compute the gradient to update the adjoint internally - - # 1/ Hessian-vector product - hv = similar(u) ; fill!(hv, 0) - w = similar(u) ; fill!(w, 0) - h_w = zeros(n) ; h_w[1] = 1.0 - copyto!(w, h_w) - ExaPF.hessprod!(nlp, hv, u, w) - - # 2/ Full Hessian - H = similar(u, n, n) ; fill!(H, 0) - ExaPF.hessian!(nlp, H, u) - - # 3/ FiniteDiff - hess_fd = FiniteDiff.finite_difference_hessian(reduced_cost, u) - - @test H * w == hv - @test H ≈ hess_fd.data rtol=rtol -end - -function test_evaluator_batch_hessian(nlp, device, M; rtol=1e-5) - n = ExaPF.n_variables(nlp) - nbatch = ExaPF.number_batches_hessian(nlp) - @test ExaPF.has_hessian(nlp) - @test nbatch > 1 - function reduced_cost(u_) - ExaPF.update!(nlp, u_) - return ExaPF.objective(nlp, u_) - end - - u = ExaPF.initial(nlp) - n = length(u) - ExaPF.update!(nlp, u) - g = ExaPF.gradient(nlp, u) # compute the gradient to update the adjoint internally - - # 0/ Update Hessian object - # 1/ Hessian-vector product - hv = similar(u, n, nbatch) ; fill!(hv, 0) - w = similar(u, n, nbatch) ; fill!(w, 0) - w[1, :] .= 1.0 - ExaPF.hessprod!(nlp, hv, u, w) - - # 2/ Full Hessian - H = similar(u, n, n) ; fill!(H, 0) - ExaPF.hessian!(nlp, H, u) - - # 3/ FiniteDiff - hess_fd = FiniteDiff.finite_difference_hessian(reduced_cost, u) - - @test H * w ≈ hv - @test H ≈ hess_fd.data rtol=rtol - - m = ExaPF.n_constraints(nlp) - if m > 0 - J = similar(u, m, n) - ExaPF.jacobian!(nlp, J, u) - function reduced_cons(u_) - cons = similar(u_, m) - ExaPF.update!(nlp, u_) - ExaPF.constraint!(nlp, cons, u_) - return cons - end - J_fd = FiniteDiff.finite_difference_jacobian(reduced_cons, u) - @test J ≈ J_fd rtol=rtol - end -end - diff --git a/test/Evaluators/auglag.jl b/test/Evaluators/auglag.jl deleted file mode 100644 index 13e0ea66..00000000 --- a/test/Evaluators/auglag.jl +++ /dev/null @@ -1,72 +0,0 @@ -function test_auglag_evaluator(nlp, device, MT) - u0 = ExaPF.initial(nlp) - w♭, w♯ = ExaPF.bounds(nlp, ExaPF.Variables()) - # Build penalty evaluator - @testset "Scaling $scaling" for scaling in [true, false] - ExaPF.reset!(nlp) - pen = ExaPF.AugLagEvaluator(nlp, u0; scale=scaling) - bgd = ExaPF.BridgeDeviceEvaluator(pen, CPU()) - u = w♭ - # Update nlp to stay on manifold - ExaPF.update!(pen, u) - # Compute objective - c = ExaPF.objective(pen, u) - c_ref = ExaPF.inner_objective(pen, u) - @test isa(c, Real) - @test c >= c_ref - inf_pr2 = ExaPF.primal_infeasibility(pen, u) - @test inf_pr2 >= 0.0 - - ################################################## - # Update penalty weigth - # (with a large-enough factor to have a meaningful derivative check) - ################################################## - ExaPF.update_penalty!(pen, η=1e3) - ExaPF.update_multipliers!(pen) - - ################################################## - # Callbacks - ################################################## - ExaPF.update!(pen, u) - obj = ExaPF.objective(pen, u) - g = ExaPF.gradient(pen, u) - # Compare with finite differences - function reduced_cost(u_) - ExaPF.update!(bgd, u_) - return ExaPF.objective(bgd, u_) - end - grad_fd = FiniteDiff.finite_difference_gradient(reduced_cost, u |> Array) - @test myisapprox(grad_fd, g, rtol=1e-5) - - # Test Hessian only on ReducedSpaceEvaluator and SlackEvaluator - if ( - isa(nlp, ExaPF.ReducedSpaceEvaluator) || - isa(nlp, ExaPF.SlackEvaluator) - ) - n = length(u) - ExaPF.update!(pen, u) - hv = similar(u) ; fill!(hv, 0) - w = similar(u) - h_w = zeros(n) ; h_w[1] = 1.0 - copyto!(w, h_w) - - ExaPF.hessprod!(pen, hv, u, w) - H = similar(u, n, n) ; fill!(H, 0) - ExaPF.hessian!(pen, H, u) - # Is Hessian vector product relevant? - @test H * w ≈ hv - # Is Hessian correct? - hess_fd = FiniteDiff.finite_difference_hessian(reduced_cost, u) - - h_H = H |> Array - h_H_fd = hess_fd.data |> Array - - @test isapprox(h_H, h_H_fd, rtol=1e-5) - end - # Test estimation of multipliers (only on SlackEvaluator) - if isa(nlp, ExaPF.SlackEvaluator) && isa(device, CPU) - λ = ExaPF.estimate_multipliers(pen, u) - end - end -end - diff --git a/test/Evaluators/interface.jl b/test/Evaluators/interface.jl deleted file mode 100644 index f32d03fa..00000000 --- a/test/Evaluators/interface.jl +++ /dev/null @@ -1,96 +0,0 @@ -# Check that interfaces of Evaluators are well implemented -@testset "API of evaluator $Evaluator" for Evaluator in [ - ExaPF.ReducedSpaceEvaluator, - ExaPF.AugLagEvaluator, - ExaPF.ProxALEvaluator, - ExaPF.SlackEvaluator, - ExaPF.FeasibilityEvaluator, -] - datafile = joinpath(INSTANCES_DIR, "case9.m") - # Default constructor: should take as input path to instance - nlp = Evaluator(datafile) - - # Test printing - println(devnull, nlp) - - # Test consistence - n = ExaPF.n_variables(nlp) - m = ExaPF.n_constraints(nlp) - u = ExaPF.initial(nlp) - - u_min, u_max = ExaPF.bounds(nlp, ExaPF.Variables()) - g_min, g_max = ExaPF.bounds(nlp, ExaPF.Constraints()) - buffer = get(nlp, ExaPF.PhysicalState()) - - @testset "Evaluator's API" begin - @test n == length(u) - @test length(u_min) == length(u_max) == n - @test u_min <= u_max - @test length(g_min) == length(g_max) == m - if m > 0 - @test g_min <= g_max - end - - @test isa(get(nlp, ExaPF.Constraints()), Array{Function}) - @test isa(get(nlp, State()), AbstractVector) - @test isa(buffer, ExaPF.AbstractBuffer) - @test ExaPF.constraints_type(nlp) in [:bound, :equality, :inequality] - - @test isa(ExaPF.has_hessian(nlp), Bool) - @test isa(ExaPF.has_hessian_lagrangian(nlp), Bool) - - # setters - nbus = get(nlp, PS.NumberOfBuses()) - loads = similar(u, nbus) ; fill!(loads, 1) - ExaPF.setvalues!(nlp, PS.ActiveLoad(), loads) - ExaPF.setvalues!(nlp, PS.ReactiveLoad(), loads) - - ExaPF.reset!(nlp) - end - - @testset "Evaluator's callbacks" begin - # 1/ update! function - conv = ExaPF.update!(nlp, u) - @test isa(conv, ExaPF.ConvergenceStatus) - @test conv.has_converged - - # 2/ objective function - c = ExaPF.objective(nlp, u) - @test isa(c, Real) - - # 3/ gradient! function - function reduced_cost(u_) - ExaPF.update!(nlp, u_) - return ExaPF.objective(nlp, u_) - end - g = similar(u) ; fill!(g, 0) - ExaPF.gradient!(nlp, g, u) - grad_fd = FiniteDiff.finite_difference_gradient(reduced_cost, u) - @test isapprox(g, grad_fd, rtol=1e-5) - - # 4/ Constraint - ## Evaluation of the constraints - if m > 0 - cons = similar(g_min) ; fill!(cons, 0.0) - ExaPF.constraint!(nlp, cons, u) - - # Vector product - jv = similar(u_min) ; fill!(jv, 0.0) - v = similar(g_min) ; fill!(v, 1.0) - ExaPF.jtprod!(nlp, jv, u, v) - function reduced_cons(u_) - ExaPF.update!(nlp, u_) - ExaPF.constraint!(nlp, cons, u_) - return dot(v, cons) - end - jv_fd = FiniteDiff.finite_difference_gradient(reduced_cons, u) - - @test isapprox(jv, jv_fd, rtol=1e-5) - - # Jacobian - J = ExaPF.jacobian(nlp, u) - @test J' * v ≈ jv - end - end -end - diff --git a/test/Evaluators/powerflow.jl b/test/Evaluators/powerflow.jl deleted file mode 100644 index cf5003b0..00000000 --- a/test/Evaluators/powerflow.jl +++ /dev/null @@ -1,21 +0,0 @@ -function test_powerflow_evaluator(nlp, device, AT) - # Parameters - npartitions = 8 - - # Get reduced space Jacobian - J = ExaPF.adjoint_jacobian(nlp, State()) - n = size(J, 1) - # Build preconditioner - precond = LinearSolvers.BlockJacobiPreconditioner(J, npartitions, device) - # Retrieve initial state of network - uk = ExaPF.initial(nlp) - - @testset "Powerflow solver $(LinSolver)" for LinSolver in ExaPF.list_solvers(device) - algo = LinSolver(J; P=precond) - nlp.linear_solver = algo - convergence = ExaPF.update!(nlp, uk) - @test convergence.has_converged - @test convergence.norm_residuals < nlp.powerflow_solver.tol - ExaPF.reset!(nlp) - end -end diff --git a/test/Evaluators/proxal_evaluator.jl b/test/Evaluators/proxal_evaluator.jl deleted file mode 100644 index 24305b9e..00000000 --- a/test/Evaluators/proxal_evaluator.jl +++ /dev/null @@ -1,84 +0,0 @@ - -function test_proxal_evaluator(nlp, device, MT) - u0 = ExaPF.initial(nlp) - @testset "ProxALEvaluators ($time)" for time in [ExaPF.Origin, ExaPF.Normal, ExaPF.Final] - # Build ProxAL evaluator - prox = ExaPF.ProxALEvaluator(nlp, time) - # Wrapper for FiniteDiff - bgd = ExaPF.BridgeDeviceEvaluator(prox, ExaPF.CPU()) - - n = ExaPF.n_variables(prox) - w = ExaPF.initial(prox) - @test length(w) == n - - # Update nlp to stay on manifold - conv = ExaPF.update!(prox, w) - @test conv.has_converged - - # Compute objective - c = ExaPF.objective(prox, w) - - @testset "Gradient & Hessian" begin - g = similar(w) ; fill!(g, 0) - ExaPF.gradient!(prox, g, w) - - # Test evaluation of gradient with Finite Differences - function reduced_cost(w_) - ExaPF.update!(bgd, w_) - return ExaPF.objective(bgd, w_) - end - grad_fd = FiniteDiff.finite_difference_gradient(reduced_cost, w |> Array) - @test myisapprox(grad_fd[:], g, rtol=1e-6) - - # Test gradient with non-trivial penalties - λf = 0.5 .* rand(prox.ng) - λt = 1.5 .* rand(prox.ng) - pgf = rand(prox.ng) - ExaPF.update_primal!(prox, ExaPF.Previous(), pgf) - ExaPF.update_multipliers!(prox, ExaPF.Next(), λt) - ExaPF.update_multipliers!(prox, ExaPF.Current(), λf) - - ExaPF.update!(prox, w) - fill!(g, 0) - ExaPF.gradient!(prox, g, w) - grad_fd = FiniteDiff.finite_difference_gradient(reduced_cost, w |> Array) - @test myisapprox(grad_fd[:], g, rtol=1e-6) - - hv = similar(w) ; fill!(hv, 0) - tgt = similar(w) ; fill!(tgt, 0) - tgt[1:1] .= 1.0 - ExaPF.hessprod!(prox, hv, w, tgt) - H = ExaPF.hessian(prox, w) - - hess_fd = FiniteDiff.finite_difference_hessian(reduced_cost, w) - # Take attribute data as hess_fd is of type Symmetric - @test_broken H ≈ hess_fd.data rtol=1e-6 - end - - @testset "Constraints" begin - m_I = ExaPF.n_constraints(prox) - cons = similar(w, m_I) ; fill!(cons, 0) - # Evaluate constraints - ExaPF.constraint!(prox, cons, w) - # Transpose Jacobian vector product - v = similar(w, m_I) ; fill!(v, 0) - jv = similar(w, n) ; fill!(jv, 0) - ExaPF.jtprod!(prox, jv, w, v) - - # Jacobian structure - if isa(device, CPU) - rows, cols = ExaPF.jacobian_structure(prox) - # Evaluation - jac = ExaPF.jacobian(prox, w) - # Check correctness of transpose Jacobian vector product - @test jv == jac' * v - - # Jacobian vector product - ExaPF.jprod!(prox, v, w, jv) - @test v == jac * jv - end - end - - ExaPF.reset!(prox) - end -end diff --git a/test/Evaluators/test_rgm.jl b/test/Evaluators/test_rgm.jl deleted file mode 100644 index e7bcca4a..00000000 --- a/test/Evaluators/test_rgm.jl +++ /dev/null @@ -1,47 +0,0 @@ -# Verify solutions against matpower results -using Test -using ExaPF -using FiniteDiff -using ForwardDiff -using LinearAlgebra -using KernelAbstractions - -@testset "RGM Optimal Power flow 9 bus case" begin - datafile = joinpath(INSTANCES_DIR, "case9.m") - - nlp = ExaPF.ReducedSpaceEvaluator(datafile) - uk = ExaPF.initial(nlp) - - # solve power flow - ExaPF.update!(nlp, uk) - - # reduced gradient method - iterations = 0 - iter_max = 100 - step = 0.0001 - norm_grad = 10000 - norm_tol = 1e-5 - - iter = 1 - - # initial gradient - grad = similar(uk) - wk = similar(uk) - up = copy(uk) - fill!(grad, 0) - - while norm_grad > norm_tol && iter < iter_max - ExaPF.update!(nlp, uk) - c = ExaPF.objective(nlp, uk) - ExaPF.gradient!(nlp, grad, uk) - # compute control step - wk = uk - step*grad - ExaPF.project!(uk, wk, nlp.u_min, nlp.u_max) - norm_grad = norm(uk .- up, Inf) - iter += 1 - up .= uk - end - @test iter == 39 - @test isapprox(uk, [1.1, 1.1, 1.1, 1.343109921105559, 0.9421135274454701], atol=1e-4) -end - diff --git a/test/Polar/TestPolarForm.jl b/test/Polar/TestPolarForm.jl index 281720b9..1a3e848d 100644 --- a/test/Polar/TestPolarForm.jl +++ b/test/Polar/TestPolarForm.jl @@ -12,9 +12,10 @@ using Random using SparseArrays using ExaPF -import ExaPF: PowerSystem, AutoDiff +import ExaPF: PowerSystem, AutoDiff, LinearSolvers const PS = PowerSystem +const LS = LinearSolvers include("api.jl") include("autodiff.jl") diff --git a/test/Polar/api.jl b/test/Polar/api.jl index d0df8bc4..44949e62 100644 --- a/test/Polar/api.jl +++ b/test/Polar/api.jl @@ -119,3 +119,25 @@ function test_polar_constraints(polar, device, M) return nothing end +function test_polar_powerflow(polar, device, M) + pf_solver = NewtonRaphson(tol=1e-6) + npartitions = 8 + # Get reduced space Jacobian on the CPU + J = ExaPF.powerflow_jacobian(polar) + n = size(J, 1) + # Build preconditioner + precond = LS.BlockJacobiPreconditioner(J, npartitions, device) + + # Init AD + jx = AutoDiff.Jacobian(polar, ExaPF.power_balance, State()) + # Init buffer + buffer = get(polar, ExaPF.PhysicalState()) + + @testset "Powerflow solver $(LinSolver)" for LinSolver in ExaPF.list_solvers(device) + algo = LinSolver(J; P=precond) + ExaPF.init_buffer!(polar, buffer) + convergence = ExaPF.powerflow(polar, jx, buffer, pf_solver; linear_solver=algo) + @test convergence.has_converged + @test convergence.norm_residuals < pf_solver.tol + end +end diff --git a/test/cusolver.jl b/test/cusolver.jl deleted file mode 100644 index 921d9fdc..00000000 --- a/test/cusolver.jl +++ /dev/null @@ -1,28 +0,0 @@ - -using CUDAKernels -using BlockPowerFlow - -using CUDA.CUSPARSE -import ExaPF: LinearSolvers -import BlockPowerFlow: CUSOLVERRF - -const LS = LinearSolvers -const CUDA_ARCH = (CUDADevice(), CuArray, CuSparseMatrixCSR) - -# Overload factorization routine to use cusolverRF -LS.exa_factorize(J::CuSparseMatrixCSR) = CUSOLVERRF.CusolverRfLU(J) -LS.exa_factorize(J::CuSparseMatrixCSC) = CUSOLVERRF.CusolverRfLU(J) - -# Overload factorization for batch Hessian computation -function ExaPF._batch_hessian_factorization(J::CuSparseMatrixCSR, nbatch) - Jtrans = CUSPARSE.CuSparseMatrixCSC(J) - if nbatch == 1 - lufac = CUSOLVERRF.CusolverRfLU(J) - lufact = CUSOLVERRF.CusolverRfLU(Jtrans) - else - lufac = CUSOLVERRF.CusolverRfLUBatch(J, nbatch) - lufact = CUSOLVERRF.CusolverRfLUBatch(Jtrans, nbatch) - end - return (lufac, lufact) -end - diff --git a/test/runtests.jl b/test/runtests.jl index 03ed6638..bbc5557c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -18,14 +18,15 @@ const CASES = ["case9.m", "case30.m"] ARCHS = Any[(CPU(), Array, SparseMatrixCSC)] if has_cuda_gpu() - include("cusolver.jl") + using CUDAKernels + using CUDA.CUSPARSE + CUDA_ARCH = (CUDADevice(), CuArray, CuSparseMatrixCSR) push!(ARCHS, CUDA_ARCH) end # Load test modules @isdefined(TestLinearSolvers) || include("TestLinearSolvers.jl") @isdefined(TestPolarFormulation) || include("Polar/TestPolarForm.jl") -@isdefined(TestEvaluators) || include("Evaluators/TestEvaluators.jl") init_time = time() @testset "Test ExaPF" begin @@ -59,25 +60,9 @@ init_time = time() TestPolarFormulation.runtests(datafile, device, AT) end println("Took $(round(time() - tic; digits=1)) seconds.") - - println("Test Evaluators ...") - tic = time() - @testset "ExaPF.Evaluator $(case)" for case in CASES - datafile = joinpath(INSTANCES_DIR, case) - TestEvaluators.runtests(datafile, device, AT) - end - println("Took $(round(time() - tic; digits=1)) seconds.") end println() - @testset "Test reduced gradient algorithms" begin - @info "Test reduced gradient algorithm ..." - tic = time() - include("Evaluators/test_rgm.jl") - include("Evaluators/MOI_wrapper.jl") - println("Took $(round(time() - tic; digits=1)) seconds.\n") - end - @testset "Test Documentation" begin include("quickstart.jl") end