Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Solve mathematical programs in parallel #21957

Merged
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
888c8d7
wip
AlexandreAmice Sep 25, 2024
eb2434f
finalize interface
AlexandreAmice Sep 26, 2024
6a4ae34
remove useless bazel bloat
AlexandreAmice Sep 26, 2024
3419d94
bind python
AlexandreAmice Oct 3, 2024
3a472b8
bazel lint fix
AlexandreAmice Oct 4, 2024
79c7263
Merge remote-tracking branch 'upstream' into solve_mathematical_progr…
jwnimmer-tri Oct 4, 2024
37477a8
switch test to use ipopt instead of (absent) snopt
jwnimmer-tri Oct 4, 2024
3d8bfc3
small style cleanups in entry points
jwnimmer-tri Oct 4, 2024
69da36e
clean up par-for dispatch some related comments
jwnimmer-tri Oct 4, 2024
6096c53
rewrite and clarify the implementation
jwnimmer-tri Oct 4, 2024
88c9a28
failing the mixed none test
AlexandreAmice Oct 4, 2024
be93c63
fixed bindings
AlexandreAmice Oct 9, 2024
89565e7
bound feasible region of IPOPT to avoid console spam
AlexandreAmice Oct 15, 2024
967322c
fix ipopt solver issue
AlexandreAmice Oct 15, 2024
02b4c4b
add parallelism test to all solvers
AlexandreAmice Oct 18, 2024
a7a295d
ensure that solvers use threads
AlexandreAmice Oct 18, 2024
6a3857d
switch to warn once
AlexandreAmice Oct 18, 2024
7163020
add initial guess to help sonoma along
AlexandreAmice Oct 18, 2024
7803b7c
remove ipopt test for thread safe linear solvers
AlexandreAmice Oct 18, 2024
637feb1
flag ipopt as not threadsafe
AlexandreAmice Oct 18, 2024
aa1499c
Merge remote-tracking branch 'upstream' into solve_mathematical_progr…
jwnimmer-tri Oct 20, 2024
b4af933
Merge remote-tracking branch 'AlexandreAmice/solve_mathematical_progr…
jwnimmer-tri Oct 20, 2024
6a8cef3
fix ipopt class comment
jwnimmer-tri Oct 20, 2024
d0ee7e7
Remove linear-solver logic for ipopt
jwnimmer-tri Oct 20, 2024
c72361c
Move solve_in_parallel test to a dedicated file
jwnimmer-tri Oct 20, 2024
a022df0
move all SolverInParallell tests into one place
jwnimmer-tri Oct 20, 2024
bcb82f5
De-duplicate the integration tests
jwnimmer-tri Oct 20, 2024
6473c79
rm chaff
jwnimmer-tri Oct 20, 2024
3ae1b4c
rm chaff
jwnimmer-tri Oct 20, 2024
8484456
fix doc typos
jwnimmer-tri Oct 20, 2024
ab0c5ae
fix cc nits
jwnimmer-tri Oct 20, 2024
d6982f4
clean up bindings
jwnimmer-tri Oct 20, 2024
a1998a1
[workspace] Force-disable CoinUtils debugging hooks
jwnimmer-tri Oct 20, 2024
dcad859
Merge branch 'coin-debung' into solve_mathematical_program_in_parallel
jwnimmer-tri Oct 20, 2024
6eac03c
merge upstream
jwnimmer-tri Oct 20, 2024
f18eba5
Merge remote-tracking branch 'drake_master/master' into solve_mathema…
AlexandreAmice Oct 21, 2024
5cdc362
fix small grammar nit
AlexandreAmice Oct 21, 2024
6e432e6
a bit more cleanup
AlexandreAmice Oct 21, 2024
b1b85c7
feature review
AlexandreAmice Oct 24, 2024
b01e902
update tests a bit more
AlexandreAmice Oct 24, 2024
75db004
platform review
AlexandreAmice Oct 24, 2024
0e7f7f5
small nit on comments
AlexandreAmice Oct 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 70 additions & 1 deletion bindings/pydrake/solvers/solvers_py_mathematicalprogram.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1682,7 +1682,76 @@ void BindFreeFunctions(py::module m) {
const std::optional<SolverOptions>&>(&solvers::Solve),
py::arg("prog"), py::arg("initial_guess") = py::none(),
py::arg("solver_options") = py::none(), doc.Solve.doc_3args)
.def("GetProgramType", &solvers::GetProgramType, doc.GetProgramType.doc);
.def("GetProgramType", &solvers::GetProgramType, doc.GetProgramType.doc)
.def(
"SolveInParallel",
// The pybind11 infrastructure cannot handle setting a vector to null,
// nor having nulls inside of a vector. We must use a lambda signature
// where pointers are never null by adding `optional<>` decorations.
// Inside of the lambda we'll demote nullopts back to nullptrs. Note
// that SolverOptions is not necessarily cheap to copy, so we still
// carefully accept it by-pointer. The VectorXd is always necessarily
// copied when going form numpy to Eigen so we still pass it by-value.
[](std::vector<const MathematicalProgram*> progs,
std::optional<std::vector<std::optional<Eigen::VectorXd>>>
initial_guesses,
std::optional<std::vector<std::optional<SolverOptions*>>>
solver_options,
std::optional<std::vector<std::optional<SolverId>>> solver_ids,
const Parallelism& parallelism, bool dynamic_schedule) {
std::vector<const Eigen::VectorXd*> initial_guesses_ptrs;
if (initial_guesses.has_value()) {
initial_guesses_ptrs.reserve(initial_guesses->size());
for (const auto& guess : *initial_guesses) {
initial_guesses_ptrs.push_back(guess ? &(*guess) : nullptr);
}
}
std::vector<const SolverOptions*> solver_options_ptrs;
if (solver_options.has_value()) {
solver_options_ptrs.reserve(solver_options->size());
for (const auto& option : *solver_options) {
solver_options_ptrs.push_back(option ? *option : nullptr);
}
}
return solvers::SolveInParallel(progs,
initial_guesses.has_value() ? &initial_guesses_ptrs : nullptr,
solver_options.has_value() ? &solver_options_ptrs : nullptr,
solver_ids.has_value() ? &(*solver_ids) : nullptr, parallelism,
dynamic_schedule);
},
py::arg("progs"), py::arg("initial_guesses") = std::nullopt,
py::arg("solver_options") = std::nullopt,
py::arg("solver_ids") = std::nullopt,
py::arg("parallelism") = Parallelism::Max(),
py::arg("dynamic_schedule") = false,
doc.SolveInParallel
.doc_6args_progs_initial_guesses_solver_options_solver_ids_parallelism_dynamic_schedule)
.def(
"SolveInParallel",
[](std::vector<const MathematicalProgram*> progs,
std::optional<std::vector<std::optional<Eigen::VectorXd>>>
initial_guesses,
const SolverOptions* solver_options,
const std::optional<SolverId>& solver_id,
const Parallelism& parallelism, bool dynamic_schedule) {
std::vector<const Eigen::VectorXd*> initial_guesses_ptrs;
if (initial_guesses.has_value()) {
initial_guesses_ptrs.reserve(initial_guesses->size());
for (const auto& guess : *initial_guesses) {
initial_guesses_ptrs.push_back(guess ? &(*guess) : nullptr);
}
}
return solvers::SolveInParallel(progs,
initial_guesses.has_value() ? &initial_guesses_ptrs : nullptr,
solver_options, solver_id, parallelism, dynamic_schedule);
},
py::arg("progs"), py::arg("initial_guesses") = std::nullopt,
py::arg("solver_options") = std::nullopt,
py::arg("solver_id") = std::nullopt,
py::arg("parallelism") = Parallelism::Max(),
py::arg("dynamic_schedule") = false,
doc.SolveInParallel
.doc_6args_progs_initial_guesses_solver_options_solver_id_parallelism_dynamic_schedule);
}

} // namespace
Expand Down
98 changes: 97 additions & 1 deletion bindings/pydrake/solvers/test/mathematicalprogram_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import scipy.sparse

from pydrake.autodiffutils import AutoDiffXd
from pydrake.common import kDrakeAssertIsArmed
from pydrake.common import kDrakeAssertIsArmed, Parallelism
from pydrake.common.test_utilities import numpy_compare
from pydrake.forwarddiff import jacobian
from pydrake.math import ge
Expand Down Expand Up @@ -1527,6 +1527,102 @@ def test_mathematical_program_result(self):
self.assertEqual(result.get_solution_result(),
mp.SolutionResult.kSolutionResultNotSet)

def test_solve_in_parallel(self):
prog = mp.MathematicalProgram()
x = prog.NewContinuousVariables(2)
prog.AddLinearConstraint(x[0] + x[1] == 2)
prog.AddQuadraticCost(x[0] ** 2, is_convex=True)

num_progs = 4
progs = [prog for _ in range(num_progs)]
initial_guesses = [np.zeros(2) for _ in range(num_progs)]
solver_ids = [ScsSolver().solver_id() for _ in range(num_progs)]
solver_options = [SolverOptions() for _ in range(num_progs)]

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=None,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=None,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Finally, interleave None into initial_guesses, solver_options, and
# solver_ids.
initial_guesses[0] = None
solver_options[0] = None
solver_ids[0] = None
results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Now we test the overload
results = mp.SolveInParallel(progs=progs,
initial_guesses=None,
solver_options=SolverOptions(),
solver_id=ScsSolver().solver_id(),
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=SolverOptions(),
solver_id=ScsSolver().solver_id(),
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Ensure that all options being None does not cause ambiguity.
results = mp.SolveInParallel(progs=progs,
initial_guesses=None,
solver_options=None,
solver_id=None,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Ensure default arguments do not cause ambiguity.
results = mp.SolveInParallel(progs=progs)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))


class DummySolverInterface(SolverInterface):

Expand Down
28 changes: 28 additions & 0 deletions solvers/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,7 @@ drake_cc_library(
deps = [
":clarabel_solver",
":clp_solver",
":csdp_solver",
":gurobi_solver",
":ipopt_solver",
":mathematical_program",
Expand Down Expand Up @@ -726,10 +727,13 @@ drake_cc_library(
":mathematical_program",
":mathematical_program_result",
":solver_base",
"//common:parallelism",
],
implementation_deps = [
":choose_best_solver",
":ipopt_solver",
"//common:nice_type_name",
"@common_robotics_utilities",
],
)

Expand Down Expand Up @@ -2007,6 +2011,30 @@ drake_cc_googletest(
],
)

drake_cc_googletest(
name = "solve_in_parallel_test",
# Using multiple threads is an essential part of testing SolveInParallel.
num_threads = 4,
# Run each test case in a separate process, for improved latency.
shard_count = 16,
deps = [
":choose_best_solver",
":clarabel_solver",
":clp_solver",
":csdp_solver",
":gurobi_solver",
":ipopt_solver",
":linear_program_examples",
":mosek_solver",
":nlopt_solver",
":osqp_solver",
":quadratic_program_examples",
":scs_solver",
":snopt_solver",
":solve",
],
)

drake_cc_googletest(
name = "aggregate_costs_constraints_test",
deps = [
Expand Down
12 changes: 12 additions & 0 deletions solvers/ipopt_solver.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#pragma once

#include <ostream>
#include <set>
#include <string>

#include <Eigen/Core>
Expand Down Expand Up @@ -40,6 +41,17 @@ struct IpoptSolverDetails {
const char* ConvertStatusToString() const;
};

/**
* A wrapper to call
* <a href="https://coin-or.github.io/Ipopt/">Ipopt</a>
* using Drake's MathematicalProgram.
*
* The IpoptSolver is NOT threadsafe to call in parallel. This is due to Ipopt's
* reliance on the MUMPs linear solver which is not safe to call concurrently
* (see https://github.com/coin-or/Ipopt/issues/733). This can be resolved by
* enabling the SPRAL solver (see Drake issue
* https://github.com/RobotLocomotion/drake/issues/21476).
*/
class IpoptSolver final : public SolverBase {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(IpoptSolver);
Expand Down
Loading