Skip to content

Commit

Permalink
Support self concordant functions (#130)
Browse files Browse the repository at this point in the history
Added options for user to pick FW variant. Currently blended pairwise conditional gradient, away frank wolfe, vanilla frank wolfe and blended conditional gradient are supported. The user is also able to add their own variant of Frank-Wolfe.
  • Loading branch information
dhendryc authored Jun 7, 2023
1 parent d5d29c9 commit ecf8db0
Show file tree
Hide file tree
Showing 12 changed files with 524 additions and 93 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Bonobo = "0.1.3"
DataStructures = "0.18"
Distributions = "0.25"
FrankWolfe = "0.2.11"
FrankWolfe = "0.2.24"
HiGHS = "1"
MathOptInterface = "1"
MathOptSetDistances = "0.2"
Expand Down
2 changes: 1 addition & 1 deletion examples/portfolio.jl
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ const Mi = (Ai + Ai') / 2
return storage
end

x, _, result = Boscia.solve(f, grad!, lmo, verbose=true)
x, _, result = Boscia.solve(f, grad!, lmo, verbose=true, time_limit=600)
@test dot(ai, x) <= bi + 1e-6
@test f(x) <= f(result[:raw_solution]) + 1e-6
end
Expand Down
1 change: 1 addition & 0 deletions src/Boscia.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import MathOptSetDistances as MOD

include("time_tracking_lmo.jl")
include("bounds.jl")
include("frank_wolfe_variants.jl")
include("node.jl")
include("custom_bonobo.jl")
include("callbacks.jl")
Expand Down
58 changes: 31 additions & 27 deletions src/callbacks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,43 +26,47 @@ function build_FW_callback(tree, min_number_lower, check_rounding_value::Bool, f
@assert is_linear_feasible(tree.root.problem.lmo, state.v)
end
# @assert is_linear_feasible(tree.root.problem.lmo, state.x)

push!(fw_iterations, state.t)
if ncalls != state.lmo.ncalls
ncalls = state.lmo.ncalls
(best_v, best_val) =
find_best_solution(tree.root.problem.f, tree.root.problem.lmo.lmo.o, vars)
if best_val < tree.incumbent
tree.root.updated_incumbent[] = true
node = tree.nodes[tree.root.current_node_id[]]
sol = FrankWolfeSolution(best_val, best_v, node, :SCIP)
push!(tree.solutions, sol)
if tree.incumbent_solution === nothing ||
sol.objective < tree.incumbent_solution.objective
tree.incumbent_solution = sol

if state.lmo !== nothing # can happen with using Blended Conditional Gradient
if ncalls != state.lmo.ncalls
ncalls = state.lmo.ncalls
(best_v, best_val) =
find_best_solution(tree.root.problem.f, tree.root.problem.lmo.lmo.o, vars, tree.root.options[:domain_oracle])
if best_val < tree.incumbent
tree.root.updated_incumbent[] = true
node = tree.nodes[tree.root.current_node_id[]]
sol = FrankWolfeSolution(best_val, best_v, node, :SCIP)
push!(tree.solutions, sol)
if tree.incumbent_solution === nothing ||
sol.objective < tree.incumbent_solution.objective
tree.incumbent_solution = sol
end
tree.incumbent = best_val
Bonobo.bound!(tree, node.id)
end
tree.incumbent = best_val
Bonobo.bound!(tree, node.id)
end
end

if (state.primal - state.dual_gap > tree.incumbent + 1e-2) && tree.num_nodes != 1 && state.t > min_fw_iterations
return false
end

val = tree.root.problem.f(state.v)
if val < tree.incumbent
tree.root.updated_incumbent[] = true
#TODO: update solution without adding node
node = tree.nodes[tree.root.current_node_id[]]
sol = FrankWolfeSolution(val, copy(state.v), node, :vertex)
push!(tree.solutions, sol)
if tree.incumbent_solution === nothing ||
sol.objective < tree.incumbent_solution.objective
tree.incumbent_solution = sol
if tree.root.options[:domain_oracle](state.v)
val = tree.root.problem.f(state.v)
if val < tree.incumbent
tree.root.updated_incumbent[] = true
#TODO: update solution without adding node
node = tree.nodes[tree.root.current_node_id[]]
sol = FrankWolfeSolution(val, copy(state.v), node, :vertex)
push!(tree.solutions, sol)
if tree.incumbent_solution === nothing ||
sol.objective < tree.incumbent_solution.objective
tree.incumbent_solution = sol
end
tree.incumbent = val
Bonobo.bound!(tree, node.id)
end
tree.incumbent = val
Bonobo.bound!(tree, node.id)
end

node = tree.nodes[tree.root.current_node_id[]]
Expand Down
7 changes: 7 additions & 0 deletions src/custom_bonobo.jl
Original file line number Diff line number Diff line change
Expand Up @@ -106,5 +106,12 @@ function Bonobo.get_solution(
tree::Bonobo.BnBTree{N,R,V,S};
result=1,
) where {N,R,V,S<:FrankWolfeSolution{N,V}}
if isempty(tree.solutions)
@warn "There is no solution in the tree. This behaviour can happen if you have supplied
\na custom domain oracle. In that case, try to increase the time limit. If you have not specified a
\ndomain oracle, please report!"
@assert tree.root.problem.solving_stage == TIME_LIMIT_REACHED
return nothing
end
return tree.solutions[result].solution
end
217 changes: 217 additions & 0 deletions src/frank_wolfe_variants.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@

"""
Frank-Wolfe variant used to compute the problems at node level.
A `FrankWolfeVariant` must implement
```
solve_frank_wolfe(fw::FrankWolfeVariant, f, grad!, lmo, active_set, line_search, epsilon, max_iteration,
added_dropped_vertices, use_extra_vertex_storage, callback, lazy, timeout, verbose, workspace))
```
It may also implement `build_frank_wolfe_workspace(x)` which creates a
workspace structure that is passed as last argument to `solve_frank_wolfe`.
"""
abstract type FrankWolfeVariant end

# default printing for FrankWolfeVariant is just showing the type
Base.print(io::IO, fw::FrankWolfeVariant) = print(io, split(string(typeof(fw)), ".")[end])

"""
solve_frank_wolfe(fw::FrankWolfeVariant, f, grad!, lmo, active_set, line_search, epsilon, max_iteration,
added_dropped_vertices, use_extra_vertex_storage, callback, lazy, timeout, verbose, workspace)
Returns the optimal solution x to the node problem, its primal and dual gap and the active set.
"""
function solve_frank_wolfe end

build_frank_wolfe_workspace(::FrankWolfeVariant, x) = nothing


"""
Away-Frank-Wolfe
In every iteration, it computes the worst performing vertex, called away vertex, in the active set with regard to the gradient.
If enough local progress can be made, weight is shifted from the away vertex to all other vertices.
In case lazification is activated, the FW vertex is only computed if not enough local progress can be guaranteed.
"""
struct AwayFrankWolfe <: FrankWolfeVariant end

function solve_frank_wolfe(
frank_wolfe_variant::AwayFrankWolfe,
f,
grad!,
lmo,
active_set;
line_search::FrankWolfe.LineSearchMethod=FrankWolfe.Adaptive(),
epsilon=1e-7,
max_iteration=10000,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
callback=nothing,
lazy=false,
timeout=Inf,
verbose=false,
workspace=nothing
)
x, _, primal, dual_gap, _, active_set = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
active_set,
epsilon=epsilon,
max_iteration=max_iteration,
line_search=line_search,
callback=callback,
lazy=lazy,
timeout=timeout,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
verbose=verbose,
)

return x, primal, dual_gap, active_set
end

Base.print(io::IO, ::AwayFrankWolfe) = print(io, "Away-Frank-Wolfe")


"""
Blended Conditional Gradient
"""
struct Blended <: FrankWolfeVariant end

function solve_frank_wolfe(
frank_wolfe_variant::Blended,
f,
grad!,
lmo,
active_set;
line_search::FrankWolfe.LineSearchMethod=FrankWolfe.Adaptive(),
epsilon=1e-7,
max_iteration=10000,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
callback=nothing,
lazy=false,
timeout=Inf,
verbose=false,
workspace=nothing
)
x,_, primal, dual_gap,_, active_set = blended_conditional_gradient(
f,
grad!,
lmo,
active_set,
line_search=line_search,
epsilon=epsilon,
max_iteration=max_iteration,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
callback=callback,
timeout=timeout,
verbose=verbose,
)

return x, primal, dual_gap, active_set
end

Base.print(io::IO, ::Blended) = print(io, "Blended Conditional Gradient")

"""
Blended Pairwise Conditional Gradient
"""
struct BPCG <: FrankWolfeVariant end

function solve_frank_wolfe(
frank_wolfe_variant::BPCG,
f,
grad!,
lmo,
active_set;
line_search::FrankWolfe.LineSearchMethod=FrankWolfe.Adaptive(),
epsilon=1e-7,
max_iteration=10000,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
callback=nothing,
lazy=false,
timeout=Inf,
verbose=false,
workspace=nothing
)
x, _, primal, dual_gap, _, active_set = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
lmo,
active_set,
line_search=line_search,
epsilon=epsilon,
max_iteration=max_iteration,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
callback=callback,
lazy=lazy,
timeout=timeout,
verbose=verbose
)

return x, primal, dual_gap, active_set
end

Base.print(io::IO, ::BPCG) = print(io, "Blended Pairwise Conditional Gradient")

"""
Vanilla-Frank-Wolfe
The standard variant of Frank-Wolfe. In each iteration, the vertex v minimizing ∇f * (x-v) is computed.
Lazification cannot be used in this setting.
"""
struct VanillaFrankWolfe <: FrankWolfeVariant end

function solve_frank_wolfe(
frank_wolfe_variant::VanillaFrankWolfe,
f,
grad!,
lmo,
active_set;
line_search::FrankWolfe.LineSearchMethod=FrankWolfe.Adaptive(),
epsilon=1e-7,
max_iteration=10000,
add_dropped_vertices=false,
use_extra_vertex_storage=false,
extra_vertex_storage=nothing,
callback=nothing,
lazy=false,
timeout=Inf,
verbose=false,
workspace=nothing
)
# If the flag away_steps is set to false, away_frank_wolfe performs Vanilla.
# Observe that the lazy flag is only observed if away_steps is set to true, so it can neglected.
x, _, primal, dual_gap, _, active_set = FrankWolfe.away_frank_wolfe(
f,
grad!,
lmo,
active_set,
away_steps=false,
epsilon=epsilon,
max_iteration=max_iteration,
line_search=line_search,
callback=callback,
timeout=timeout,
add_dropped_vertices=add_dropped_vertices,
use_extra_vertex_storage=use_extra_vertex_storage,
extra_vertex_storage=extra_vertex_storage,
verbose=verbose,
)

return x, primal, dual_gap, active_set
end

Base.print(io::IO, ::VanillaFrankWolfe) = print(io, "Vanilla-Frank-Wolfe")
26 changes: 15 additions & 11 deletions src/heuristics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,34 +3,38 @@
Finds the best solution in the SCIP solution storage, based on the objective function `f`.
Returns the solution vector and the corresponding best value.
"""
function find_best_solution(f::Function, o::SCIP.Optimizer, vars::Vector{MOI.VariableIndex})
function find_best_solution(f::Function, o::SCIP.Optimizer, vars::Vector{MOI.VariableIndex}, domain_oracle)
sols_vec =
unsafe_wrap(Vector{Ptr{Cvoid}}, SCIP.LibSCIP.SCIPgetSols(o), SCIP.LibSCIP.SCIPgetNSols(o))
best_val = Inf
best_v = nothing
for sol in sols_vec
v = SCIP.sol_values(o, vars, sol)
val = f(v)
if val < best_val
best_val = val
best_v = v
if domain_oracle(v)
val = f(v)
if val < best_val
best_val = val
best_v = v
end
end
end
@assert isfinite(best_val)
#@assert isfinite(best_val) -> not necessarily the case if the domain oracle is not the default.
return (best_v, best_val)
end

function find_best_solution(f::Function, o::MOI.AbstractOptimizer, vars::Vector{MOI.VariableIndex})
function find_best_solution(f::Function, o::MOI.AbstractOptimizer, vars::Vector{MOI.VariableIndex}, domain_oracle)
nsols = MOI.get(o, MOI.ResultCount())
@assert nsols > 0
best_val = Inf
best_v = nothing
for sol_idx in 1:nsols
xv = [MOI.get(o, MOI.VariablePrimal(sol_idx), xi) for xi in vars]
val = f(xv)
if val < best_val
best_val = val
best_v = xv
if domain_oracle(xv)
val = f(xv)
if val < best_val
best_val = val
best_v = xv
end
end
end
return (best_v, best_val)
Expand Down
Loading

0 comments on commit ecf8db0

Please sign in to comment.