Skip to content

Commit

Permalink
Merge pull request #113 from chrhansk/feature-qp-tests
Browse files Browse the repository at this point in the history
Add QP test instances
  • Loading branch information
chrhansk authored Aug 26, 2024
2 parents 161200a + eada56c commit e044811
Show file tree
Hide file tree
Showing 10 changed files with 255 additions and 14 deletions.
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
project = "pygradflow"
copyright = "2023, Christoph Hansknecht"
author = "Christoph Hansknecht"
release = "0.5.18"
release = "0.5.19"

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
Expand Down
23 changes: 23 additions & 0 deletions pygradflow/implicit_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,21 @@ def apply_project_deriv(self, mat: sp.sparse.spmatrix, active_set: np.ndarray):


class ImplicitFunc(StepFunc):
"""
Standard residual function for implicit Euler steps
with the value
.. math::
F(x, y; \\hat{x}, \\hat{y}) =
\\begin{pmatrix}
x - P_{C} (\\hat{x} - \\Delta t \\nabla_{x} \\mathcal{L}^{\\rho}(x, y)) \\\\
y - (\\hat{y} + \\Delta t \\nabla_{y} \\mathcal{L}^{\\rho}(x, y)
\\end{pmatrix}
The values :math:`\\hat{x}` and :math:`\\hat{y}` are obtained
from the iteratre provided in the constructor
"""

def __init__(self, problem: Problem, iterate: Iterate, dt: float) -> None:
super().__init__(problem, iterate, dt)

Expand Down Expand Up @@ -185,6 +200,14 @@ def deriv_at(


class ScaledImplicitFunc(StepFunc):
"""
The *scaled* residual function for implicit Euler steps
is defined as the normal residual function
given by :py:class:`pygradflow.implicit_func.ImplicitFunc`
scaled by a factor
of :math:`\\lambda = 1 / \\Delta t`.
"""

def __init__(self, problem: Problem, iterate: Iterate, dt: float) -> None:
super().__init__(problem, iterate, dt)
self.lamb = 1.0 / dt
Expand Down
13 changes: 11 additions & 2 deletions pygradflow/newton.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,17 @@ def __init__(

self.step_solver = step_solver
self.step_solver.update_derivs(orig_iterate)
self._curr_active_set = None

def step(self, iterate):
active_set = self.func.compute_active_set(iterate, self.rho, self.tau)

self.step_solver.update_active_set(active_set)
if self._curr_active_set is None:
self.step_solver.update_active_set(active_set)
elif (self._curr_active_set != active_set).any():
self.step_solver.update_active_set(active_set)

self._curr_active_set = active_set

return self.step_solver.solve(iterate)

Expand Down Expand Up @@ -247,6 +253,9 @@ def step(self, iterate):
func_value = self.func.value_at(iterate, self.rho)
res_value = 0.5 * np.dot(func_value, func_value)

if res_value <= params.newton_tol:
return step_result

# TODO: Create a method in the step function
# to compute a forward product instead to make
# everything more efficient
Expand All @@ -271,7 +280,7 @@ def step(self, iterate):
next_func_value = self.func.value_at(next_iterate, self.rho)
next_res_value = 0.5 * np.dot(next_func_value, next_func_value)

if np.isclose(next_res_value, 0.0):
if next_res_value <= params.newton_tol:
break

if next_res_value <= res_value + (1e-4 * alpha * inner_product):
Expand Down
7 changes: 4 additions & 3 deletions pygradflow/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ def __init__(
dist_factor: float,
**attrs
):
self.problem = problem
self.num_vars = problem.num_vars
self.num_cons = problem.num_cons
self._attrs = attrs

self._x = x
Expand All @@ -39,8 +40,8 @@ def _set_path(self, path, model_times):
self._attrs["path"] = path
self._attrs["model_times"] = model_times

num_vars = self.problem.num_vars
num_cons = self.problem.num_cons
num_vars = self.num_vars
num_cons = self.num_cons

assert model_times.ndim == 1
assert path.shape == (num_vars + num_cons, len(model_times))
Expand Down
3 changes: 3 additions & 0 deletions pygradflow/runners/instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ def __init__(self, name, num_vars, num_cons):
self.num_vars = num_vars
self.num_cons = num_cons

def __repr__(self):
return f"{self.__class__.__name__}({self.name})"

@property
def size(self):
return self.num_vars + self.num_cons
Expand Down
26 changes: 23 additions & 3 deletions pygradflow/step/box_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,10 @@ def solve_step_ipopt(self, iterate, rho, dt, timer):
return reduced_problem.solve(timer=timer)

def solve_step_box(self, iterate, rho, dt, timer):
from .box_solver import BoxSolverError, solve_box_constrained
from .box_solver import BoxSolverError, BoxSolverStatus, solve_box_constrained

problem = self.problem
params = self.params
lamb = 1.0 / dt

def objective(x):
Expand All @@ -231,9 +232,28 @@ def hessian(x):
return self.hessian(iterate, x, lamb, rho)

try:
return solve_box_constrained(
iterate.x, objective, gradient, hessian, problem.var_lb, problem.var_ub
result = solve_box_constrained(
iterate.x,
objective,
gradient,
hessian,
problem.var_lb,
problem.var_ub,
obj_lower=params.obj_lower_limit,
)

if result.success:
return result.x

elif result.status == BoxSolverStatus.Unbounded:
return result.x
else:
raise StepSolverError(
"Box-constrained solver failed to converge (status = {})".format(
result.status
)
)

except BoxSolverError as e:
raise StepSolverError("Box-constrained solver failed to converge") from e

Expand Down
34 changes: 30 additions & 4 deletions pygradflow/step/box_solver.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from enum import Enum, auto
from typing import Callable

import numpy as np
Expand All @@ -10,6 +11,22 @@ class BoxSolverError(Exception):
pass


class BoxSolverStatus(Enum):
Optimal = auto()
Unbounded = auto()
IterationLimit = auto()


class BoxSolverResult:
def __init__(self, x, status, iterations):
self.x = x
self.status = status

@property
def success(self):
return self.status == BoxSolverStatus.Optimal


# Based on "Projected Newton Methods for Optimization Problems with Simple Constraints"
def solve_box_constrained(
x0: np.ndarray,
Expand All @@ -18,10 +35,11 @@ def solve_box_constrained(
hess: Callable[[np.ndarray], sp.sparse.spmatrix],
lb: np.ndarray,
ub: np.ndarray,
obj_lower: float,
max_it=1000,
atol: float = 1e-8,
rtol: float = 1e-8,
):
atol: float = 1e-6,
rtol: float = 1e-6,
) -> BoxSolverResult:

(n,) = x0.shape
assert lb.shape == (n,)
Expand All @@ -32,10 +50,16 @@ def solve_box_constrained(
beta = 0.5
sigma = 1e-3

status = BoxSolverStatus.IterationLimit

for iteration in range(max_it):
curr_func = func(curr_x)
curr_grad = grad(curr_x)

if curr_func <= obj_lower:
status = BoxSolverStatus.Unbounded
break

assert curr_grad.shape == (n,)

at_lower = np.isclose(curr_x, lb)
Expand All @@ -52,9 +76,11 @@ def solve_box_constrained(
grad_norm = np.linalg.norm(curr_grad, ord=np.inf)

if grad_norm < atol:
status = BoxSolverStatus.Optimal
break

if (residuum < atol) or (residuum / grad_norm) < rtol:
status = BoxSolverStatus.Optimal
break

active = np.logical_or(active_lower, active_upper)
Expand Down Expand Up @@ -105,4 +131,4 @@ def solve_box_constrained(
else:
raise BoxSolverError(f"Did not converge after {max_it} iterations")

return curr_x
return BoxSolverResult(curr_x, status, iteration)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "pygradflow"
version = "0.5.18"
version = "0.5.19"
description = "PyGradFlow is a simple implementation of the sequential homotopy method to be used to solve general nonlinear programs."
authors = ["Christoph Hansknecht <[email protected]>"]
readme = "README.md"
Expand Down
30 changes: 30 additions & 0 deletions tests/pygradflow/qp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from pygradflow.problem import Problem


class QP(Problem):
def __init__(self, hess, jac, grad, res, lb, ub):
self.m, self.n = jac.shape
assert hess.shape[0] == self.n
assert hess.shape[1] == self.n
assert grad.shape[0] == self.n
assert res.shape[0] == self.m
super().__init__(lb, ub, num_cons=self.m)
self.A = hess
self.B = jac
self.a = grad
self.b = res

def obj(self, x):
return 0.5 * x.T @ self.A @ x + self.a.T @ x

def obj_grad(self, x):
return self.A @ x + self.a

def cons(self, x):
return self.B @ x + self.b

def cons_jac(self, x):
return self.B

def lag_hess(self, x, _):
return self.A
Loading

0 comments on commit e044811

Please sign in to comment.