diff --git a/Project.toml b/Project.toml index 08a0793..a09b3a8 100644 --- a/Project.toml +++ b/Project.toml @@ -6,6 +6,7 @@ version = "0.0.1" [deps] Catlab = "134e5e36-593f-5add-ad60-77f754baafbe" +ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" @@ -13,6 +14,7 @@ Optim = "429524aa-4258-5aef-a3af-852621145aeb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" @@ -21,7 +23,7 @@ Catlab = "0.16.10" ForwardDiff = "0.10.36" NLsolve = "4.5.1" Optim = "1.9.4" +Reexport = "1.2.2" SparseArrays = "1.10.0" StatsBase = "0.34.3" julia = "1.10" -Reexport = "1.2.2" diff --git a/docs/literate/literate_example.jl b/docs/literate/literate_example.jl index 941b4c6..a3654f5 100644 --- a/docs/literate/literate_example.jl +++ b/docs/literate/literate_example.jl @@ -10,4 +10,4 @@ using AlgebraicOptimization # # We provide the `hello(string)` method which prints "Hello, `string`!" -#hello("World") +# hello("World") diff --git a/src/FinSetAlgebras.jl b/src/FinSetAlgebras.jl index bf87137..20d3651 100644 --- a/src/FinSetAlgebras.jl +++ b/src/FinSetAlgebras.jl @@ -2,7 +2,7 @@ # TODO: upstream into Catlab.jl module FinSetAlgebras -export FinSetAlgebra, CospanAlgebra, Open, hom_map, laxator, data, portmap +export FinSetAlgebra, CospanAlgebra, Open, hom_map, laxator, data, portmap, draw, draw_types using LinearAlgebra, SparseArrays using Catlab @@ -82,9 +82,13 @@ end data(obj::Open{T}) where T = obj.o portmap(obj::Open{T}) where T = obj.m -# Helper function for when m is identity. +# Helper functions for when m is identity. function Open{T}(o::T) where T - Open{T}(domain(o), o, id(domain(o))) + Open{T}(dom(o), o, id(dom(o))) +end + +function Open{T}(S::FinSet, o::T) where T + Open{T}(S, o, id(dom(o))) end function Open{T}(o::T, m::FinFunction) where T @@ -142,4 +146,14 @@ function oapply(CA::CospanAlgebra{Open{T}}, FA::FinSetAlgebra{T}, d::AbstractUWD return oapply(CA, FA, uwd_to_cospan(d), Xs) end -end \ No newline at end of file + +function draw(uwd) + to_graphviz(uwd, box_labels=:name, junction_labels=:variable, edge_attrs=Dict(:len => ".75")) +end + +function draw_types(uwd) # Add better typing and error catching for if uwd is untyped + to_graphviz(uwd, box_labels=:name, junction_labels=:junction_type, edge_attrs=Dict(:len => ".75")) +end + + +end # module \ No newline at end of file diff --git a/src/Objectives.jl b/src/Objectives.jl index 30c52bd..a853bdf 100644 --- a/src/Objectives.jl +++ b/src/Objectives.jl @@ -10,6 +10,8 @@ using Catlab import Catlab: oapply, dom using ForwardDiff using Optim +using ComponentArrays + # Primal Minimization Problems and Gradient Descent ################################################### @@ -38,7 +40,7 @@ struct MinObj <: FinSetAlgebra{PrimalObjective} end The morphism map is defined by ϕ ↦ (f ↦ f∘ϕ^*). """ hom_map(::MinObj, ϕ::FinFunction, p::PrimalObjective) = - PrimalObjective(codom(ϕ), x->p(pullback_matrix(ϕ)*x)) + PrimalObjective(codom(ϕ), x->p(pullback_function(ϕ, x))) """ laxator(::MinObj, Xs::Vector{PrimalObjective}) @@ -46,7 +48,7 @@ Takes the "disjoint union" of a collection of primal objectives. """ function laxator(::MinObj, Xs::Vector{PrimalObjective}) c = coproduct([dom(X) for X in Xs]) - subproblems = [x -> X(pullback_matrix(l)*x) for (X,l) in zip(Xs, legs(c))] + subproblems = [x -> X(pullback_function(l, x)) for (X,l) in zip(Xs, legs(c))] objective(x) = sum([sp(x) for sp in subproblems]) return PrimalObjective(apex(c), objective) end @@ -65,7 +67,20 @@ end Returns the gradient flow optimizer of a given primal objective. """ function gradient_flow(f::Open{PrimalObjective}) - return Open{Optimizer}(f.S, x -> -ForwardDiff.gradient(f.o, x), f.m) + function f_wrapper(ca::ComponentArray) + inputs = [ca[key] for key in keys(ca)] + f.o(inputs) + end + + function gradient_descent(x) + init_conds = ComponentVector(;zip([Symbol(i) for i in 1:length(x)], x)...) + grad = -ForwardDiff.gradient(f_wrapper, init_conds) + [grad[key] for key in keys(grad)] + end + + return Open{Optimizer}(f.S, x -> gradient_descent(x), f.m) + + # return Open{Optimizer}(f.S, x -> -ForwardDiff.gradient(f.o, x), f.m) # Scalar version end function solve(f::Open{PrimalObjective}, x0::Vector{Float64}, ss::Float64, n_steps::Int) @@ -101,14 +116,14 @@ struct DualComp <: FinSetAlgebra{SaddleObjective} end # Only "glue" along dual variables hom_map(::DualComp, ϕ::FinFunction, p::SaddleObjective) = SaddleObjective(p.primal_space, codom(ϕ), - (x,λ) -> p(x, pullback_matrix(ϕ)*λ)) + (x,λ) -> p(x, pullback_function(ϕ, λ))) # Laxate along both primal and dual variables function laxator(::DualComp, Xs::Vector{SaddleObjective}) c1 = coproduct([X.primal_space for X in Xs]) c2 = coproduct([X.dual_space for X in Xs]) subproblems = [(x,λ) -> - X(pullback_matrix(l1)*x, pullback_matrix(l2)*λ) for (X,l1,l2) in zip(Xs, legs(c1), legs(c2))] + X(pullback_function(l1, x), pullback_function(l2, λ)) for (X,l1,l2) in zip(Xs, legs(c1), legs(c2))] objective(x,λ) = sum([sp(x,λ) for sp in subproblems]) return SaddleObjective(apex(c1), apex(c2), objective) end @@ -128,4 +143,4 @@ function gradient_flow(of::Open{SaddleObjective}) λ -> ForwardDiff.gradient(dual_objective(f, x(λ)), λ), of.m) end -end \ No newline at end of file +end # module \ No newline at end of file diff --git a/src/OpenFlowGraphs.jl b/src/OpenFlowGraphs.jl index d672139..8d12aa4 100644 --- a/src/OpenFlowGraphs.jl +++ b/src/OpenFlowGraphs.jl @@ -49,7 +49,7 @@ struct FG <: FinSetAlgebra{FlowGraph} end hom_map(::FG, ϕ::FinFunction, g::FlowGraph) = FlowGraph(codom(ϕ), g.edges, g.src⋅ϕ, g.tgt⋅ϕ, - g.edge_costs, pushforward_matrix(ϕ)*g.flows) + g.edge_costs, pushforward_function(ϕ, g.flows)) function laxator(::FG, gs::Vector{FlowGraph}) laxed_src = reduce(⊕, [g.src for g in gs]) diff --git a/src/Optimizers.jl b/src/Optimizers.jl index bca291f..6fc7642 100644 --- a/src/Optimizers.jl +++ b/src/Optimizers.jl @@ -1,30 +1,15 @@ # Implement the cospan-algebra of dynamical systems. module Optimizers -export pullback_matrix, pushforward_matrix, Optimizer, OpenContinuousOpt, OpenDiscreteOpt, Euler, - simulate +export Optimizer, OpenContinuousOpt, OpenDiscreteOpt, Euler, + simulate, pullback_function, pushforward_function using ..FinSetAlgebras import ..FinSetAlgebras: hom_map, laxator using Catlab import Catlab: oapply, dom -using SparseArrays +using ComponentArrays -""" pullback_matrix(f::FinFunction) - -The pullback of f : n → m is the linear map f^* : Rᵐ → Rⁿ defined by -f^*(y)[i] = y[f(i)]. -""" -function pullback_matrix(f::FinFunction) - n = length(dom(f)) - sparse(1:n, f.(dom(f)), ones(Int,n), dom(f).n, codom(f).n) -end - -""" pushforward_matrix(f::FinFunction) - -The pushforward is the dual of the pullback. -""" -pushforward_matrix(f::FinFunction) = pullback_matrix(f)' """ Optimizer @@ -46,17 +31,18 @@ struct DiscreteOpt <: FinSetAlgebra{Optimizer} end The hom map is defined as ϕ ↦ (s ↦ ϕ_*∘s∘ϕ^*). """ -hom_map(::ContinuousOpt, ϕ::FinFunction, s::Optimizer) = - Optimizer(codom(ϕ), x->pushforward_matrix(ϕ)*s(pullback_matrix(ϕ)*x)) +function hom_map(::ContinuousOpt, ϕ::FinFunction, s::Optimizer) + Optimizer(codom(ϕ), x -> pushforward_function(ϕ, s(pullback_function(ϕ, x)))) +end """ hom_map(::DiscreteOpt, ϕ::FinFunction, s::Optimizer) The hom map is defined as ϕ ↦ (s ↦ id + ϕ_*∘(s - id)∘ϕ^*). """ hom_map(::DiscreteOpt, ϕ::FinFunction, s::Optimizer) = - Optimizer(codom(ϕ), x-> begin - y = pullback_matrix(ϕ)*x - return x + pushforward_matrix(ϕ)*(s(y) - y) + Optimizer(codom(ϕ), x -> begin + y = pullback_function(ϕ, x) + return x + pushforward_function(ϕ, (s(y) - y)) end) """ laxator(::ContinuousOpt, Xs::Vector{Optimizer}) @@ -65,10 +51,10 @@ Takes the "disjoint union" of a collection of optimizers. """ function laxator(::ContinuousOpt, Xs::Vector{Optimizer}) c = coproduct([dom(X) for X in Xs]) - subsystems = [x -> X(pullback_matrix(l)*x) for (X,l) in zip(Xs, legs(c))] + subsystems = [x -> X(pullback_function(l, x)) for (X, l) in zip(Xs, legs(c))] function parallel_dynamics(x) res = Vector{Vector}(undef, length(Xs)) # Initialize storage for results - #=Threads.@threads=# for i = 1:length(Xs) + for i = 1:length(Xs) #=Threads.@threads=# res[i] = subsystems[i](x) end return vcat(res...) @@ -78,7 +64,14 @@ end # Same as continuous opt laxator(::DiscreteOpt, Xs::Vector{Optimizer}) = laxator(ContinuousOpt(), Xs) + Open{Optimizer}(S::FinSet, v::Function, m::FinFunction) = Open{Optimizer}(S, Optimizer(S, v), m) +Open{Optimizer}(s::Int, v::Function, m::FinFunction) = Open{Optimizer}(FinSet(s), v, m) + +# Special cases: m is an identity +Open{Optimizer}(S::FinSet, v::Function) = Open{Optimizer}(S, Optimizer(S, v), id(S)) +Open{Optimizer}(s::Int, v::Function) = Open{Optimizer}(FinSet(s), v) + # Turn into cospan-algebras. struct OpenContinuousOpt <: CospanAlgebra{Open{Optimizer}} end @@ -94,11 +87,12 @@ end # Euler's method is a natural transformation from continous optimizers to discrete optimizers. function Euler(f::Open{Optimizer}, γ::Float64) - return Open{Optimizer}(f.S, Optimizer(f.S, x->x+γ*f.o(x)), f.m) + return Open{Optimizer}(f.S, + Optimizer(f.S, x -> x .+ γ .* f.o(x)), f.m) end # Run a discrete optimizer the designated number of time-steps. -function simulate(f::Open{Optimizer}, x0::Vector{Float64}, tsteps::Int) +function simulate(f::Open{Optimizer}, x0::Vector, tsteps::Int) res = x0 for i in 1:tsteps res = f.o(res) @@ -106,4 +100,85 @@ function simulate(f::Open{Optimizer}, x0::Vector{Float64}, tsteps::Int) return res end -end \ No newline at end of file +# Run a discrete optimizer the designated number of time-steps. +function simulate(f::Open{Optimizer}, d::AbstractUWD, x0::ComponentArray, tsteps::Int) + # Format initial conditions + initial_cond_vec = zeros(length(d[:variable])) + var_to_index = Dict() + curr_index = 1 + for junction in d[:junction] + if !haskey(var_to_index, d[:variable][junction]) + var_to_index[d[:variable][junction]] = curr_index + curr_index += 1 + end + end + + for (var, index) in var_to_index + initial_cond_vec[index] = x0[var] + end + res = initial_cond_vec + # Simulate + for i in 1:tsteps + res = f.o(res) + end + + res_formatted = ComponentArray(a=1.1, b=22, c=33, d=44, e=55, f=66, g=77, h=88, i=99) + + # Rebuild component array + for (var, index) in var_to_index + res_formatted[var] = res[index] + end + return res_formatted +end + +function (f::Open{Optimizer})(x0::Vector) + return f.o(x0) +end + + + +""" pullback_function(f::FinFunction, v::Vector) + +The pullback of f : n → m is the linear map f^* : Rᵐ → Rⁿ defined by +f^*(y)[i] = y[f(i)]. +""" +function pullback_function(f::FinFunction, v::Vector)::Vector + return [v[f(i)] for i in 1:length(dom(f))] +end + + +""" pushforward_function(f::FinFunction, v::Vector{Vector{Float64}}) + +The pushforward of f : n → m is the linear map f_* : Rⁿ → Rᵐ defined by +f_*(y)[j] = ∑ y[i] for i ∈ f⁻¹(j). +""" +function pushforward_function(f::FinFunction, v::Vector{Vector{Float64}})::Vector + output = [[] for _ in 1:length(codom(f))] + for i in 1:length(dom(f)) + if isempty(output[f(i)]) + output[f(i)] = v[i] + else + output[f(i)] += v[i] + end + end + return output +end + + +""" pushforward_function(f::FinFunction, v::Vector{Float64}) + +The pushforward of f : n → m is the linear map f_* : Rⁿ → Rᵐ defined by +f_*(y)[j] = ∑ y[i] for i ∈ f⁻¹(j). +""" +function pushforward_function(f::FinFunction, v::Vector{Float64})::Vector + output = [0.0 for _ in 1:length(codom(f))] + + for i in 1:length(dom(f)) + output[f(i)] += v[i] + end + + return output +end + + +end # module \ No newline at end of file diff --git a/test/Objectives.jl b/test/Objectives.jl index 615e236..5ff923e 100644 --- a/test/Objectives.jl +++ b/test/Objectives.jl @@ -1,8 +1,9 @@ using AlgebraicOptimization using Test using Catlab +using ComponentArrays -# Test naturality of gradient descent +# Test naturality of gradient descent: scalar variables d = @relation (x,y,z) begin f(w,x) g(u,w,y) @@ -35,23 +36,107 @@ composite_prob = oapply(d, [p1,p2,p3]) optimizer_of_composite = gradient_flow(composite_prob) -o1 = gradient_flow(p1) -o2 = gradient_flow(p2) -o3 = gradient_flow(p3) +o1 = Euler(gradient_flow(p1), 0.1) +o2 = Euler(gradient_flow(p2), 0.1) +o3 = Euler(gradient_flow(p3), 0.1) -composite_of_optimizers = oapply(OpenContinuousOpt(), d, [o1,o2,o3]) +composite_of_optimizers = oapply(OpenDiscreteOpt(), d, [o1,o2,o3]) dc1 = Euler(optimizer_of_composite, 0.1) -dc2 = Euler(composite_of_optimizers, 0.1) +dc2 = composite_of_optimizers x0 = repeat([100.0], length(composite_prob.S)) tsteps = 1000 r1 = simulate(dc1, x0, tsteps) r2 = simulate(dc2, x0, tsteps) -#println(r1) -#println(r2) +@test r1 ≈ r2 + + +# Test ComponentArray version of input/output on scalar variables +# Note that all variables must be exposed to use this i/o system +d = @relation () begin + f(a, b, c, d, e) + g(f, g, a) + h(b, a, f, h) +end + +P = [2.1154 -0.3038 0.368 -1.5728 -1.203 + -0.3038 1.5697 1.0226 0.159 -0.946 + 0.368 1.0226 1.847 -0.4916 -1.2668 + -1.5728 0.159 -0.4916 2.2192 1.5315 + -1.203 -0.946 -1.2668 1.5315 1.9281] +Q = [0.2456 0.3564 -0.0088 + 0.3564 0.5912 -0.0914 + -0.0088 -0.0914 0.8774] +R = [2.0546 -1.333 -0.5263 0.3189 + -1.333 1.0481 -0.0211 0.2462 + -0.5263 -0.0211 0.951 -0.7813 + 0.3189 0.2462 -0.7813 1.5813] + +a = [-0.26, 0.22, 0.09, 0.19, -0.96] +b = [-0.72, 0.12, 0.41] +c = [0.55, 0.51, 0.6, -0.61] + +p1 = Open{PrimalObjective}(FinSet(5), PrimalObjective(FinSet(5),x->x'*P*x + a'*x)) +p2 = Open{PrimalObjective}(FinSet(3), PrimalObjective(FinSet(3),x->x'*Q*x + b'*x)) +p3 = Open{PrimalObjective}(FinSet(4), PrimalObjective(FinSet(4),x->x'*R*x + c'*x)) + +composite_prob = oapply(d, [p1,p2,p3]) + +optimizer_of_composite = gradient_flow(composite_prob) + +o1 = Euler(gradient_flow(p1), 0.1) +o2 = Euler(gradient_flow(p2), 0.1) +o3 = Euler(gradient_flow(p3), 0.1) + +composite_of_optimizers = oapply(OpenDiscreteOpt(), d, [o1,o2,o3]) + +dc1 = Euler(optimizer_of_composite, 0.1) +dc2 = composite_of_optimizers + + +x2 = ComponentArray(a=11, b=22, c=33, d=44, e=55, f=66, g=77, h=88, i=99) +tsteps = 1000 +r1 = simulate(dc1, d, x2, tsteps) +r2 = simulate(dc2, d, x2, tsteps) @test r1 ≈ r2 +# Test naturality of gradient descent: vector variables +d = @relation (x,y,z) begin + f(w,x) + g(u,w,y) + h(u,w,z) +end + +f1 = x -> x[1][3] + x[4][1] - x[3][2] / x[2][1] +f2 = x -> 22 * x[2][2] / x[3][1] +f3 = x -> sum(sum(v) for v in x) + +p1 = Open{PrimalObjective}(FinSet(5), PrimalObjective(FinSet(5), f1), FinFunction([2,4], 5)) +p2 = Open{PrimalObjective}(FinSet(3), PrimalObjective(FinSet(3), f2), id(FinSet(3))) +p3 = Open{PrimalObjective}(FinSet(4), PrimalObjective(FinSet(4), f3), FinFunction([1,3,4])) + +composite_prob = oapply(d, [p1,p2,p3]) + +optimizer_of_composite = gradient_flow(composite_prob) + +o1 = Euler(gradient_flow(p1), 0.1) +o2 = Euler(gradient_flow(p2), 0.1) +o3 = Euler(gradient_flow(p3), 0.1) + +composite_of_optimizers = oapply(OpenDiscreteOpt(), d, [o1,o2,o3]) + +dc1 = Euler(optimizer_of_composite, 0.1) +dc2 = composite_of_optimizers + +x1::Vector{Vector{Float64}} = [[1, 1, 2], [2, 2], [3, 3], [4, 4], [1, 1], [2], [3], [4, 40, 40], [5]] + + +tsteps = 1000 +r1 = simulate(dc1, x1, tsteps) +r2 = simulate(dc2, x1, tsteps) + +@test r1 ≈ r2 \ No newline at end of file diff --git a/test/OpenFlowGraphs.jl b/test/OpenFlowGraphs.jl index 6c5c252..c1c8662 100644 --- a/test/OpenFlowGraphs.jl +++ b/test/OpenFlowGraphs.jl @@ -1,8 +1,13 @@ using Test using AlgebraicOptimization +using Catlab - +d = @relation (x,y,z) begin + f(w,x) + g(u,w,y) + h(u,w,z) +end g = random_open_flowgraph(10, .2, 3) A = node_incidence_matrix(data(g)) diff --git a/test/Optimizers.jl b/test/Optimizers.jl index f9e6c57..1780ccc 100644 --- a/test/Optimizers.jl +++ b/test/Optimizers.jl @@ -2,8 +2,8 @@ using AlgebraicOptimization using Catlab using Test -# Test naturality of Euler -d = @relation (x,y,z) begin +# Test naturality of Euler with scalar functions +d = @relation (x,y,z,u,w) begin f(w,x) g(u,w,y) h(u,w,z) @@ -13,7 +13,7 @@ A = rand(-1:0.01:1,5,5) B = rand(-1:0.01:1,3,3) C = rand(-1:0.01:1,4,4) -o1 = Open{Optimizer}(FinSet(5), x->A*x, FinFunction([2,4], 5)) +o1 = Open{Optimizer}(FinSet(5), x->A*x, FinFunction([2,4], 5) ) o2 = Open{Optimizer}(FinSet(3), x->B*x, id(FinSet(3))) o3 = Open{Optimizer}(FinSet(4), x->C*x, FinFunction([1,3,4])) @@ -28,14 +28,80 @@ do3 = Euler(o3, 0.01) composite_of_discretizations = oapply(OpenDiscreteOpt(), d, [do1,do2,do3]) x0 = repeat([1.0], length(composite_opt.S)) + tsteps = 100 r1 = simulate(discretization_of_composites, x0, tsteps) r2 = simulate(composite_of_discretizations, x0, tsteps) -#println(r1) -#println(r2) +@test r1 ≈ r2 + + + +# Test naturality of Euler with vector functions +d = @relation (x,y,z,u,w,) begin + f(w,x) + g(u,w,y) + h(u,w,z) +end + + +mA = v -> [[v[1][1]], [v[2][1], v[1][1]], [v[3][2], v[3][1], v[4][4]], [v[4][4], v[3][1], v[2][1], v[1][1]], [5, 5, 5, 5, 10]] +mB = v -> [[6, 6, 6, 6, 6, 6], [2, 2], [7, 7, 7, 7, 7, 7, 7]] +mC = v -> v + +o1 = Open{Optimizer}(FinSet(5), mA, FinFunction([2,4], 5)) +o2 = Open{Optimizer}(FinSet(3), mB, id(FinSet(3))) +o3 = Open{Optimizer}(FinSet(4), mC, FinFunction([1,3,4])) + +composite_opt = oapply(OpenContinuousOpt(), d, [o1,o2,o3]) + +discretization_of_composites = Euler(composite_opt, 0.01) + +do1 = Euler(o1, 0.01) +do2 = Euler(o2, 0.01) +do3 = Euler(o3, 0.01) + + +composite_of_discretizations = oapply(OpenDiscreteOpt(), d, [do1,do2,do3]) + +x1::Vector{Vector{Float64}} = [[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8, 9]] +tsteps = 100 +r1 = simulate(discretization_of_composites, x1, tsteps) +r2 = simulate(composite_of_discretizations, x1, tsteps) @test r1 ≈ r2 +# Test ComponentArray version of input/output + +d = @relation () begin + f(a, b, c, d, e) + g(f, g, a) + h(b, a, f, h) +end + +A = rand(-1:0.01:1,5,5) +B = rand(-1:0.01:1,3,3) +C = rand(-1:0.01:1,4,4) + +o1 = Open{Optimizer}(FinSet(5), x->A*x) +o2 = Open{Optimizer}(FinSet(3), x->B*x) +o3 = Open{Optimizer}(FinSet(4), x->C*x) + +composite_opt = oapply(OpenContinuousOpt(), d, [o1,o2,o3]) + +discretization_of_composites = Euler(composite_opt, 0.01) + +do1 = Euler(o1, 0.01) +do2 = Euler(o2, 0.01) +do3 = Euler(o3, 0.01) + +composite_of_discretizations = oapply(OpenDiscreteOpt(), d, [do1,do2,do3]) + +x2 = ComponentArray(a=11, b=22, c=33, d=44, e=55, f=66, g=77, h=88, i=99) + +tsteps = 100 +r1 = simulate(discretization_of_composites, d, x2, tsteps) +r2 = simulate(composite_of_discretizations, d, x2, tsteps) +@test r1 ≈ r2 \ No newline at end of file diff --git a/test/Project.toml b/test/Project.toml index 99e2389..d5a8a2b 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,6 +1,7 @@ [deps] -AlgebraicOptimization = "a72ceada-00ec-4ad9-90d3-37b40eaed052" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" AlgebraicDynamics = "5fd6ff03-a254-427e-8840-ba658f502e32" +AlgebraicOptimization = "a72ceada-00ec-4ad9-90d3-37b40eaed052" Catlab = "134e5e36-593f-5add-ad60-77f754baafbe" +ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"