Skip to content

Commit

Permalink
Solve mathematical programs in parallel (#21957)
Browse files Browse the repository at this point in the history
Adds convenience methods for solving batches of MathematicalProgram in parallel.

Co-authored-by: Jeremy Nimmer <[email protected]>
  • Loading branch information
AlexandreAmice and jwnimmer-tri authored Oct 24, 2024
1 parent 40adf9c commit ec1896d
Show file tree
Hide file tree
Showing 8 changed files with 804 additions and 2 deletions.
73 changes: 72 additions & 1 deletion bindings/pydrake/solvers/solvers_py_mathematicalprogram.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1682,7 +1682,78 @@ void BindFreeFunctions(py::module m) {
const std::optional<SolverOptions>&>(&solvers::Solve),
py::arg("prog"), py::arg("initial_guess") = py::none(),
py::arg("solver_options") = py::none(), doc.Solve.doc_3args)
.def("GetProgramType", &solvers::GetProgramType, doc.GetProgramType.doc);
.def("GetProgramType", &solvers::GetProgramType, doc.GetProgramType.doc)
.def(
"SolveInParallel",
// The pybind11 infrastructure cannot handle setting a vector to null,
// nor having nulls inside of a vector. We must use a lambda signature
// where pointers are never null by adding `optional<>` decorations.
// Inside of the lambda we'll demote nullopts back to nullptrs. Note
// that SolverOptions is not necessarily cheap to copy, so we still
// carefully accept it by-pointer. The VectorXd is always necessarily
// copied when going form numpy to Eigen so we still pass it by-value.
[](std::vector<const MathematicalProgram*> progs,
std::optional<std::vector<std::optional<Eigen::VectorXd>>>
initial_guesses,
std::optional<std::vector<std::optional<SolverOptions*>>>
solver_options,
std::optional<std::vector<std::optional<SolverId>>> solver_ids,
const Parallelism& parallelism, bool dynamic_schedule) {
std::vector<const Eigen::VectorXd*> initial_guesses_ptrs;
if (initial_guesses.has_value()) {
initial_guesses_ptrs.reserve(initial_guesses->size());
for (const auto& guess : *initial_guesses) {
initial_guesses_ptrs.push_back(guess ? &(*guess) : nullptr);
}
}
std::vector<const SolverOptions*> solver_options_ptrs;
if (solver_options.has_value()) {
solver_options_ptrs.reserve(solver_options->size());
for (const auto& option : *solver_options) {
solver_options_ptrs.push_back(option ? *option : nullptr);
}
}
return solvers::SolveInParallel(progs,
initial_guesses.has_value() ? &initial_guesses_ptrs : nullptr,
solver_options.has_value() ? &solver_options_ptrs : nullptr,
solver_ids.has_value() ? &(*solver_ids) : nullptr, parallelism,
dynamic_schedule);
},
py::arg("progs"), py::arg("initial_guesses") = std::nullopt,
py::arg("solver_options") = std::nullopt,
py::arg("solver_ids") = std::nullopt,
py::arg("parallelism") = Parallelism::Max(),
py::arg("dynamic_schedule") = false,
py::call_guard<py::gil_scoped_release>(),
doc.SolveInParallel
.doc_6args_progs_initial_guesses_solver_options_solver_ids_parallelism_dynamic_schedule)
.def(
"SolveInParallel",
[](std::vector<const MathematicalProgram*> progs,
std::optional<std::vector<std::optional<Eigen::VectorXd>>>
initial_guesses,
const SolverOptions* solver_options,
const std::optional<SolverId>& solver_id,
const Parallelism& parallelism, bool dynamic_schedule) {
std::vector<const Eigen::VectorXd*> initial_guesses_ptrs;
if (initial_guesses.has_value()) {
initial_guesses_ptrs.reserve(initial_guesses->size());
for (const auto& guess : *initial_guesses) {
initial_guesses_ptrs.push_back(guess ? &(*guess) : nullptr);
}
}
return solvers::SolveInParallel(progs,
initial_guesses.has_value() ? &initial_guesses_ptrs : nullptr,
solver_options, solver_id, parallelism, dynamic_schedule);
},
py::arg("progs"), py::arg("initial_guesses") = std::nullopt,
py::arg("solver_options") = std::nullopt,
py::arg("solver_id") = std::nullopt,
py::arg("parallelism") = Parallelism::Max(),
py::arg("dynamic_schedule") = false,
py::call_guard<py::gil_scoped_release>(),
doc.SolveInParallel
.doc_6args_progs_initial_guesses_solver_options_solver_id_parallelism_dynamic_schedule);
}

} // namespace
Expand Down
98 changes: 97 additions & 1 deletion bindings/pydrake/solvers/test/mathematicalprogram_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import scipy.sparse

from pydrake.autodiffutils import AutoDiffXd
from pydrake.common import kDrakeAssertIsArmed
from pydrake.common import kDrakeAssertIsArmed, Parallelism
from pydrake.common.test_utilities import numpy_compare
from pydrake.forwarddiff import jacobian
from pydrake.math import ge
Expand Down Expand Up @@ -1527,6 +1527,102 @@ def test_mathematical_program_result(self):
self.assertEqual(result.get_solution_result(),
mp.SolutionResult.kSolutionResultNotSet)

def test_solve_in_parallel(self):
prog = mp.MathematicalProgram()
x = prog.NewContinuousVariables(2)
prog.AddLinearConstraint(x[0] + x[1] == 2)
prog.AddQuadraticCost(x[0] ** 2, is_convex=True)

num_progs = 4
progs = [prog for _ in range(num_progs)]
initial_guesses = [np.zeros(2) for _ in range(num_progs)]
solver_ids = [ScsSolver().solver_id() for _ in range(num_progs)]
solver_options = [SolverOptions() for _ in range(num_progs)]

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=None,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=None,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Finally, interleave None into initial_guesses, solver_options, and
# solver_ids.
initial_guesses[0] = None
solver_options[0] = None
solver_ids[0] = None
results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=solver_options,
solver_ids=solver_ids,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Now we test the overload
results = mp.SolveInParallel(progs=progs,
initial_guesses=None,
solver_options=SolverOptions(),
solver_id=ScsSolver().solver_id(),
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

results = mp.SolveInParallel(progs=progs,
initial_guesses=initial_guesses,
solver_options=SolverOptions(),
solver_id=ScsSolver().solver_id(),
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Ensure that all options being None does not cause ambiguity.
results = mp.SolveInParallel(progs=progs,
initial_guesses=None,
solver_options=None,
solver_id=None,
parallelism=Parallelism.Max(),
dynamic_schedule=False)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))

# Ensure default arguments do not cause ambiguity.
results = mp.SolveInParallel(progs=progs)
self.assertEqual(len(results), len(progs))
self.assertTrue(all([r.is_success() for r in results]))


class DummySolverInterface(SolverInterface):

Expand Down
28 changes: 28 additions & 0 deletions solvers/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -598,6 +598,7 @@ drake_cc_library(
deps = [
":clarabel_solver",
":clp_solver",
":csdp_solver",
":gurobi_solver",
":ipopt_solver",
":mathematical_program",
Expand Down Expand Up @@ -741,10 +742,13 @@ drake_cc_library(
":mathematical_program",
":mathematical_program_result",
":solver_base",
"//common:parallelism",
],
implementation_deps = [
":choose_best_solver",
":ipopt_solver",
"//common:nice_type_name",
"@common_robotics_utilities",
],
)

Expand Down Expand Up @@ -2024,6 +2028,30 @@ drake_cc_googletest(
],
)

drake_cc_googletest(
name = "solve_in_parallel_test",
# Using multiple threads is an essential part of testing SolveInParallel.
num_threads = 4,
# Run each test case in a separate process, for improved latency.
shard_count = 16,
deps = [
":choose_best_solver",
":clarabel_solver",
":clp_solver",
":csdp_solver",
":gurobi_solver",
":ipopt_solver",
":linear_program_examples",
":mosek_solver",
":nlopt_solver",
":osqp_solver",
":quadratic_program_examples",
":scs_solver",
":snopt_solver",
":solve",
],
)

drake_cc_googletest(
name = "aggregate_costs_constraints_test",
deps = [
Expand Down
11 changes: 11 additions & 0 deletions solvers/ipopt_solver.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,17 @@ struct IpoptSolverDetails {
const char* ConvertStatusToString() const;
};

/**
* A wrapper to call
* <a href="https://coin-or.github.io/Ipopt/">Ipopt</a>
* using Drake's MathematicalProgram.
*
* The IpoptSolver is NOT threadsafe to call in parallel. This is due to Ipopt's
* reliance on the MUMPs linear solver which is not safe to call concurrently
* (see https://github.com/coin-or/Ipopt/issues/733). This can be resolved by
* enabling the SPRAL solver (see Drake issue
* https://github.com/RobotLocomotion/drake/issues/21476).
*/
class IpoptSolver final : public SolverBase {
public:
DRAKE_NO_COPY_NO_MOVE_NO_ASSIGN(IpoptSolver);
Expand Down
Loading

0 comments on commit ec1896d

Please sign in to comment.