From 3cfc26f1399ce49ee61aa00514b87acec131cc62 Mon Sep 17 00:00:00 2001 From: jeremie Date: Sat, 15 Oct 2022 15:24:42 -0400 Subject: [PATCH 01/11] docs update --- experiments/random.jl | 14 +++-- src/fit.jl | 123 +++++++++++++++++++++++++++++++----------- src/gpu/fit_gpu.jl | 4 +- 3 files changed, 105 insertions(+), 36 deletions(-) diff --git a/experiments/random.jl b/experiments/random.jl index 4aad5bfd..c924e37e 100644 --- a/experiments/random.jl +++ b/experiments/random.jl @@ -37,7 +37,7 @@ params1 = EvoTreeRegressor(T=Float32, @btime model = fit_evotree($params1; $x_train, $y_train); @time pred_train = predict(model, x_train); @btime pred_train = predict(model, x_train); -gain = importance(model, 1:100) +gain = importance(model) @time model, cache = EvoTrees.init_evotree(params1, x_train, y_train); @time EvoTrees.grow_evotree!(model, cache); @@ -77,18 +77,24 @@ params1 = EvoTreeGaussian(T=Float32, # train model params1 = EvoTreeRegressor(T=Float32, loss=:linear, metric=:mse, - nrounds=100, + nrounds=10, lambda=1.0, gamma=0, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=0.5, nbins=64, device="gpu") # Asus laptop: 10.015568 seconds (13.80 M allocations: 1.844 GiB, 4.00% gc time) -@time model = EvoTrees.fit_evotree(params1, X_train, Y_train); -@btime model = EvoTrees.fit_evotree(params1, X_train, Y_train); +@time model = EvoTrees.fit_evotree(params1; x_train, y_train); +@btime model = EvoTrees.fit_evotree(params1; x_train, y_train); @time model, cache = EvoTrees.init_evotree_gpu(params1, X_train, Y_train); @time EvoTrees.grow_evotree!(model, cache); +using MLJBase +mach1 = machine(EvoTreeRegressor(loss=:linear, device="gpu", max_depth=5, eta=0.01, nrounds=10), x_train, y_train, cache=true) +mach2 = machine(EvoTreeRegressor(loss=:linear, device="gpu", max_depth=5, eta=0.01, nrounds=10), x_train, y_train, cache=false) +mach3 = machine(EvoTreeRegressor(loss=:linear, device="gpu", max_depth=5, eta=0.01, nrounds=10), x_train, y_train, cache=false) +fit!(mach1) + # X_train_32 = Float32.(X_train) @time pred_train = EvoTrees.predict(model, X_train); @btime pred_train = EvoTrees.predict(model, X_train); diff --git a/src/fit.jl b/src/fit.jl index 762ef05f..c98ce438 100644 --- a/src/fit.jl +++ b/src/fit.jl @@ -3,7 +3,14 @@ Initialise EvoTree """ -function init_evotree(params::EvoTypes{L,T,S}, X::AbstractMatrix, Y::AbstractVector, W=nothing, offset=nothing; fnames=nothing) where {L,T,S} +function init_evotree( + params::EvoTypes{L,T,S}, + X::AbstractMatrix, + Y::AbstractVector, + W = nothing, + offset = nothing; + fnames = nothing, +) where {L,T,S} K = 1 levels = nothing @@ -25,7 +32,7 @@ function init_evotree(params::EvoTypes{L,T,S}, X::AbstractMatrix, Y::AbstractVec Y = UInt32.(CategoricalArrays.levelcode.(Y)) else levels = sort(unique(Y)) - yc = CategoricalVector(Y, levels=levels) + yc = CategoricalVector(Y, levels = levels) K = length(levels) ΞΌ = zeros(T, K) Y = UInt32.(CategoricalArrays.levelcode.(yc)) @@ -85,16 +92,24 @@ function init_evotree(params::EvoTypes{L,T,S}, X::AbstractMatrix, Y::AbstractVec monotone_constraints[k] = v end - cache = (params=deepcopy(params), - X=X, Y=Y, K=K, - nodes=nodes, - pred=pred, - 𝑖_=𝑖_, 𝑗_=𝑗_, 𝑗=𝑗, - out=out, left=left, right=right, - δ𝑀=δ𝑀, - edges=edges, - X_bin=X_bin, - monotone_constraints=monotone_constraints) + cache = ( + params = deepcopy(params), + X = X, + Y = Y, + K = K, + nodes = nodes, + pred = pred, + 𝑖_ = 𝑖_, + 𝑗_ = 𝑗_, + 𝑗 = 𝑗, + out = out, + left = left, + right = right, + δ𝑀 = δ𝑀, + edges = edges, + X_bin = X_bin, + monotone_constraints = monotone_constraints, + ) cache.params.nrounds = 0 @@ -111,14 +126,27 @@ function grow_evotree!(evotree::GBTree{L,T,S}, cache) where {L,T,S} # loop over nrounds for i = 1:Ξ΄nrounds # select random rows and cols - sample!(params.rng, cache.𝑖_, cache.nodes[1].𝑖, replace=false, ordered=true) - sample!(params.rng, cache.𝑗_, cache.𝑗, replace=false, ordered=true) + sample!(params.rng, cache.𝑖_, cache.nodes[1].𝑖, replace = false, ordered = true) + sample!(params.rng, cache.𝑗_, cache.𝑗, replace = false, ordered = true) # build a new tree - update_grads!(L, cache.δ𝑀, cache.pred, cache.Y; alpha=params.alpha) + update_grads!(L, cache.δ𝑀, cache.pred, cache.Y; alpha = params.alpha) # assign a root and grow tree tree = Tree{L,T}(params.max_depth, evotree.K, zero(T)) - grow_tree!(tree, cache.nodes, params, cache.δ𝑀, cache.edges, cache.𝑗, cache.out, cache.left, cache.right, cache.X_bin, cache.K, cache.monotone_constraints) + grow_tree!( + tree, + cache.nodes, + params, + cache.δ𝑀, + cache.edges, + cache.𝑗, + cache.out, + cache.left, + cache.right, + cache.X_bin, + cache.K, + cache.monotone_constraints, + ) push!(evotree.trees, tree) predict!(cache.pred, tree, cache.X, cache.K) @@ -134,8 +162,14 @@ function grow_tree!( params::EvoTypes{L,T,S}, δ𝑀::Matrix{T}, edges, - 𝑗, out, left, right, - X_bin::AbstractMatrix, K, monotone_constraints) where {L,T,S} + 𝑗, + out, + left, + right, + X_bin::AbstractMatrix, + K, + monotone_constraints, +) where {L,T,S} # reset nodes @threads for n in eachindex(nodes) @@ -151,7 +185,7 @@ function grow_tree!( depth = 1 # initialize summary stats - nodes[1].βˆ‘ .= vec(sum(δ𝑀[:, nodes[1].𝑖], dims=2)) + nodes[1].βˆ‘ .= vec(sum(δ𝑀[:, nodes[1].𝑖], dims = 2)) nodes[1].gain = get_gain(L, nodes[1].βˆ‘, params.lambda, K) # grow while there are remaining active nodes while length(n_current) > 0 && depth <= params.max_depth @@ -191,7 +225,16 @@ function grow_tree!( popfirst!(n_next) else # println("typeof(nodes[n].𝑖): ", typeof(nodes[n].𝑖)) - _left, _right = split_set_threads!(out, left, right, nodes[n].𝑖, X_bin, tree.feat[n], tree.cond_bin[n], offset) + _left, _right = split_set_threads!( + out, + left, + right, + nodes[n].𝑖, + X_bin, + tree.feat[n], + tree.cond_bin[n], + offset, + ) nodes[n<<1].𝑖, nodes[n<<1+1].𝑖 = _left, _right offset += length(nodes[n].𝑖) update_childs_βˆ‘!(L, nodes, n, best[2][1], best[2][2], K) @@ -241,21 +284,36 @@ Main training function. Performs model fitting given configuration `params`, `x_ - `y_eval::Vector`: vector of evaluation targets of length `#observations`. - `w_eval::Vector`: vector of evaluation weights of length `#observations`. Defaults to `nothing` (assumes a vector of 1s). - `offset_eval::VecOrMat`: evaluation data offset. Should match the size of the predictions. +- `metric`: The evaluation metric that wil be tracked on `x_eval`, `y_eval` and optionally `w_eval` / `offset_eval` data. - `early_stopping_rounds::Integer`: number of consecutive rounds without metric improvement after which fitting in stopped. - `print_every_n`: sets at which frequency logging info should be printed. - `verbosity`: set to 1 to print logging info during training. +- `fnames`: the names of the `x_train` features. If provided, should be a vector of string whose length `= size(x_train, 2)`. """ -function fit_evotree(params::EvoTypes{L,T,S}; - x_train::AbstractMatrix, y_train::AbstractVector, w_train=nothing, offset_train=nothing, - x_eval=nothing, y_eval=nothing, w_eval=nothing, offset_eval=nothing, - metric=nothing, early_stopping_rounds=9999, print_every_n=9999, verbosity=1, fnames=nothing) where {L,T,S} +function fit_evotree( + params::EvoTypes{L,T,S}; + x_train::AbstractMatrix, + y_train::AbstractVector, + w_train = nothing, + offset_train = nothing, + x_eval = nothing, + y_eval = nothing, + w_eval = nothing, + offset_eval = nothing, + metric = nothing, + early_stopping_rounds = 9999, + print_every_n = 9999, + verbosity = 1, + fnames = nothing, +) where {L,T,S} nrounds_max = params.nrounds params.nrounds = 0 iter_since_best = 0 if params.device == "gpu" - model, cache = init_evotree_gpu(params, x_train, y_train, w_train, offset_train; fnames) + model, cache = + init_evotree_gpu(params, x_train, y_train, w_train, offset_train; fnames) else model, cache = init_evotree(params, x_train, y_train, w_train, offset_train; fnames) end @@ -268,6 +326,7 @@ function fit_evotree(params::EvoTypes{L,T,S}; offset_eval = T.(offset_eval) end + !isnothing(metric) ? metric = Symbol(metric) : nothing if !isnothing(metric) && !isnothing(x_eval) && !isnothing(y_eval) if params.device == "gpu" x_eval = CuArray(T.(x_eval)) @@ -286,9 +345,11 @@ function fit_evotree(params::EvoTypes{L,T,S}; # initialize metric metric_track = Metric() metric_best = Metric() - metric_track.metric = eval_metric(Val{metric}(), p_eval, y_eval, w_eval, params.alpha) - tracker = (iter=[0], metric=[metric_track.metric]) - @info "Initial tracking info" iter = model.params.nrounds metric = metric_track.metric + metric_track.metric = + eval_metric(Val{metric}(), p_eval, y_eval, w_eval, params.alpha) + tracker = (iter = [0], metric = [metric_track.metric]) + @info "Initial tracking info" iter = model.params.nrounds metric = + metric_track.metric end while model.params.nrounds < nrounds_max && iter_since_best < early_stopping_rounds @@ -297,7 +358,8 @@ function fit_evotree(params::EvoTypes{L,T,S}; # callback function if !isnothing(metric) && !isnothing(x_eval) && !isnothing(y_eval) predict!(p_eval, model.trees[model.params.nrounds+1], x_eval, model.K) - metric_track.metric = eval_metric(Val{metric}(), p_eval, y_eval, w_eval, params.alpha) + metric_track.metric = + eval_metric(Val{metric}(), p_eval, y_eval, w_eval, params.alpha) if metric_track.metric < metric_best.metric metric_best.metric = metric_track.metric metric_best.iter = model.params.nrounds @@ -306,7 +368,8 @@ function fit_evotree(params::EvoTypes{L,T,S}; iter_since_best += 1 end if model.params.nrounds % print_every_n == 0 && verbosity > 0 - @info "Tracking info" iter = model.params.nrounds metric = metric_track.metric + @info "Tracking info" iter = model.params.nrounds metric = + metric_track.metric end end # end of callback end diff --git a/src/gpu/fit_gpu.jl b/src/gpu/fit_gpu.jl index da6c33d4..19023421 100644 --- a/src/gpu/fit_gpu.jl +++ b/src/gpu/fit_gpu.jl @@ -1,5 +1,5 @@ function init_evotree_gpu(params::EvoTypes{L,T,S}, - X::AbstractMatrix, Y::AbstractVector, W=nothing, offset=nothing; fnames) where {L,T,S} + X::AbstractMatrix, Y::AbstractVector, W=nothing, offset=nothing; fnames=nothing) where {L,T,S} K = 1 levels = nothing @@ -106,7 +106,7 @@ function grow_evotree!(evotree::GBTreeGPU{L,T,S}, cache) where {L,T,S} predict!(cache.pred, tree, cache.X, cache.K) end # end of nrounds cache.params.nrounds = params.nrounds - # return model, cache + CUDA.reclaim() return evotree end From 05ffc625de059e2b101431c3137c3a80e7762fc4 Mon Sep 17 00:00:00 2001 From: jeremie Date: Sat, 15 Oct 2022 15:41:24 -0400 Subject: [PATCH 02/11] docs update --- Project.toml | 2 +- src/fit.jl | 15 +++++++++++++-- src/importance.jl | 1 + 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Project.toml b/Project.toml index a4c8527c..93366856 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ authors = ["jeremiedb "] name = "EvoTrees" uuid = "f6006082-12f8-11e9-0c9c-0d5d367ab1e5" -version = "0.12.0" +version = "0.12.1" [deps] BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" diff --git a/src/fit.jl b/src/fit.jl index c98ce438..827b8c0c 100644 --- a/src/fit.jl +++ b/src/fit.jl @@ -284,11 +284,22 @@ Main training function. Performs model fitting given configuration `params`, `x_ - `y_eval::Vector`: vector of evaluation targets of length `#observations`. - `w_eval::Vector`: vector of evaluation weights of length `#observations`. Defaults to `nothing` (assumes a vector of 1s). - `offset_eval::VecOrMat`: evaluation data offset. Should match the size of the predictions. -- `metric`: The evaluation metric that wil be tracked on `x_eval`, `y_eval` and optionally `w_eval` / `offset_eval` data. +- `metric`: The evaluation metric that wil be tracked on `x_eval`, `y_eval` and optionally `w_eval` / `offset_eval` data. + Supported metrics are: + + - `:mse`: mean-squared error. Adapted for general regression models. + - `:rmse`: root-mean-squared error (CPU only). Adapted for general regression models. + - `:mae`: mean absolute error. Adapted for general regression models. + - `:logloss`: Adapted for `:logistic` regression models. + - `:mlogloss`: Multi-class cross entropy. Adapted to `EvoTreeClassifier` classification models. + - `:poisson`: Poisson deviance. Adapted to `EvoTreeCount` count models. + - `:gamma`: Gamma deviance. Adapted to regression problem on Gamma like, positively distributed targets. + - `:tweedie`: Tweedie deviance. Adapted to regression problem on Tweedie like, positively distributed targets with probability mass at `y == 0`. + - `early_stopping_rounds::Integer`: number of consecutive rounds without metric improvement after which fitting in stopped. - `print_every_n`: sets at which frequency logging info should be printed. - `verbosity`: set to 1 to print logging info during training. -- `fnames`: the names of the `x_train` features. If provided, should be a vector of string whose length `= size(x_train, 2)`. +- `fnames`: the names of the `x_train` features. If provided, should be a vector of string with `length(fnames) = size(x_train, 2)`. """ function fit_evotree( params::EvoTypes{L,T,S}; diff --git a/src/importance.jl b/src/importance.jl index 4d47152e..e90080dc 100644 --- a/src/importance.jl +++ b/src/importance.jl @@ -10,6 +10,7 @@ end importance(model::GBTree) Sorted normalized feature importance based on loss function gain. +Feature names associated to the model are stored in `model.info[:fnames]` as a string `Vector` and can be updated at any time. Eg: `model.info[:fnames] = new_fnames_vec`. """ function importance(model::Union{GBTree,GBTreeGPU}) fnames = model.info[:fnames] From c9ccd2370029a83a82274a708a740dd3e50f77b1 Mon Sep 17 00:00:00 2001 From: "jeremie.desgagne.bouchard" Date: Sun, 16 Oct 2022 22:52:43 -0400 Subject: [PATCH 03/11] up --- Project.toml | 2 +- experiments/benchmarks_v2-MLE.jl | 79 ++++++++ experiments/benchmarks_v2.jl | 3 +- experiments/logistic_tests.jl | 142 ++++++++++++++ experiments/readme_plots_cpu.jl | 39 +++- experiments/readme_plots_gpu.jl | 3 +- experiments/speed_cpu_gpu.jl | 24 ++- ...{gaussian_sinus.png => gaussian-sinus.png} | Bin figures/logistic-sinus.png | Bin 0 -> 93334 bytes src/EvoTrees.jl | 2 +- src/MLJ.jl | 184 +++++++++++++++++- src/eval.jl | 9 + src/find_split.jl | 8 +- src/fit.jl | 27 ++- src/gpu/find_split_gpu.jl | 6 +- src/gpu/fit_gpu.jl | 4 +- src/gpu/loss_gpu.jl | 6 +- src/gpu/predict_gpu.jl | 12 +- src/importance.jl | 1 + src/loss.jl | 27 ++- src/models.jl | 91 ++++++++- src/predict.jl | 10 +- test/MLJ.jl | 48 ++++- test/core.jl | 69 +++++-- 24 files changed, 719 insertions(+), 77 deletions(-) create mode 100644 experiments/benchmarks_v2-MLE.jl create mode 100644 experiments/logistic_tests.jl rename figures/{gaussian_sinus.png => gaussian-sinus.png} (100%) create mode 100644 figures/logistic-sinus.png diff --git a/Project.toml b/Project.toml index a4c8527c..93366856 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ authors = ["jeremiedb "] name = "EvoTrees" uuid = "f6006082-12f8-11e9-0c9c-0d5d367ab1e5" -version = "0.12.0" +version = "0.12.1" [deps] BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" diff --git a/experiments/benchmarks_v2-MLE.jl b/experiments/benchmarks_v2-MLE.jl new file mode 100644 index 00000000..1b49ae50 --- /dev/null +++ b/experiments/benchmarks_v2-MLE.jl @@ -0,0 +1,79 @@ +using Revise +using Statistics +using StatsBase: sample +using XGBoost +using EvoTrees +using BenchmarkTools +using CUDA + +nrounds = 200 +nobs = Int(1e6) +num_feat = Int(100) +nthread = Base.Threads.nthreads() + +# EvoTrees params +params_evo = EvoTreeMLE( + T=Float32, + loss=:gaussian, + nrounds=nrounds, + lambda=0.0, + gamma=0.0, + eta=0.05, + max_depth=6, + min_weight=1.0, + rowsample=0.5, + colsample=0.5, + nbins=64, +) + +@info "testing with: $nobs observations | $num_feat features." +x_train = rand(nobs, num_feat) +y_train = rand(size(x_train, 1)) + +@info "evotrees train CPU:" +params_evo.device = "cpu" +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, print_every_n=50); +@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:gaussian); +@info "evotrees predict CPU:" +@time pred_evo = EvoTrees.predict(m_evo, x_train); +@btime EvoTrees.predict($m_evo, $x_train); + +CUDA.allowscalar(true) +@info "evotrees train GPU:" +params_evo.device = "gpu" +@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, print_every_n=50); +@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:gaussian); +@info "evotrees predict GPU:" +@time pred_evo = EvoTrees.predict(m_evo_gpu, x_train); +@btime EvoTrees.predict($m_evo_gpu, $x_train); + + +################################ +# Logistic +################################ +params_evo = EvoTreeMLE( + T=Float32, + loss=:logistic, + nrounds=nrounds, + lambda=0.0, + gamma=0.0, + eta=0.05, + max_depth=6, + min_weight=1.0, + rowsample=0.5, + colsample=0.5, + nbins=64, +) + +@info "testing with: $nobs observations | $num_feat features." +x_train = rand(nobs, num_feat) +y_train = rand(size(x_train, 1)) + +@info "evotrees train CPU:" +params_evo.device = "cpu" +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:logistic, print_every_n=50); +@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:logistic); +@info "evotrees predict CPU:" +@time pred_evo = EvoTrees.predict(m_evo, x_train); +@btime EvoTrees.predict($m_evo, $x_train); \ No newline at end of file diff --git a/experiments/benchmarks_v2.jl b/experiments/benchmarks_v2.jl index 42c0e683..5a20465e 100644 --- a/experiments/benchmarks_v2.jl +++ b/experiments/benchmarks_v2.jl @@ -10,7 +10,7 @@ nrounds = 200 nthread = Base.Threads.nthreads() @info nthread -loss = "logistic" +loss = "linear" if loss == "linear" loss_xgb = "reg:squarederror" metric_xgb = "mae" @@ -40,7 +40,6 @@ metrics = [metric_xgb] params_evo = EvoTreeRegressor( T=Float32, loss=loss_evo, - metric=metric_evo, nrounds=nrounds, alpha=0.5, lambda=0.0, diff --git a/experiments/logistic_tests.jl b/experiments/logistic_tests.jl new file mode 100644 index 00000000..d4f507de --- /dev/null +++ b/experiments/logistic_tests.jl @@ -0,0 +1,142 @@ +using Statistics +using StatsBase: sample, sample! +using EvoTrees +using BenchmarkTools +using CUDA + +# prepare a dataset +features = rand(Int(1.25e4), 100) +# features = rand(100, 10) +X = features +Y = rand(size(X, 1)) +𝑖 = collect(1:size(X, 1)) + +# train-eval split +𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +train_size = 0.8 +𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] +𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] + +x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :] +y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] + + +########################### +# Tree CPU +########################### +params_c = EvoTrees.EvoTreeMLE(T=Float32, + loss=:logistic, + nrounds=200, + lambda=0.1, gamma=0.0, eta=0.1, + max_depth=5, min_weight=1.0, + rowsample=0.5, colsample=0.5, nbins=16); + +model_c, cache_c = EvoTrees.init_evotree(params_c, x_train, y_train); +EvoTrees.grow_evotree!(model_c, cache_c) +p = model_c(x_train) +sort(p[:,1]) +sort(p[:,2]) + +# initialize from cache +params_c = model_c.params +x_size = size(cache_c.X_bin) + +# select random rows and cols +sample!(params_c.rng, cache_c.𝑖_, cache_c.nodes[1].𝑖, replace=false, ordered=true); +sample!(params_c.rng, cache_c.𝑗_, cache_c.𝑗, replace=false, ordered=true); +# @btime sample!(params_c.rng, cache_c.𝑖_, cache_c.nodes[1].𝑖, replace=false, ordered=true); +# @btime sample!(params_c.rng, cache_c.𝑗_, cache_c.𝑗, replace=false, ordered=true); + +𝑖 = cache_c.nodes[1].𝑖 +𝑗 = cache_c.𝑗 + +# build a new tree +# 897.800 ΞΌs (6 allocations: 736 bytes) +get_loss_type(m::EvoTreeGaussian{L,T,S}) where {L,T,S} = L +get_loss_type(m::EvoTrees.EvoTreeLogistic{L,T,S}) where {L,T,S} = L + +L = get_loss_type(params_c) +@time EvoTrees.update_grads!(L, cache_c.δ𝑀, cache_c.pred, cache_c.Y; alpha=params_c.alpha) +cache_c.δ𝑀 + +sort(cache_c.δ𝑀[1, :]) +sort(cache_c.δ𝑀[2, :]) +sort(cache_c.δ𝑀[3, :]) +sort(cache_c.δ𝑀[4, :]) + +p = collect(-3:0.5:3) +y = collect(-3:0.5:3) + +function get_grads(p, y) + grad = zeros(length(p), length(y)) + for i in eachindex(p) + for j in eachindex(y) + # alternate from 1 + # grad[i, j] = -(exp(-2s) * (u - y) * (u - y + exp(s) * sinh(exp(-s) * (u - y)))) / (1 + cosh(exp(-s) * (u - y))) + grad[i, j] = (exp(-2 * p[i]) * (0.0 - y[j]) * (0.0 - y[j] + exp(p[i]) * sinh(exp(-p[i]) * (0.0 - y[j])))) / (1 + cosh(exp(-p[i]) * (0.0 - y[j]))) + end + end + return grad +end + +grads = get_grads(p, y) +heatmap(grads) +# @btime EvoTrees.update_grads!($params_c.loss, $cache_c.δ𝑀, $cache_c.pred_cpu, $cache_c.Y_cpu, $params_c.Ξ±) +# βˆ‘ = vec(sum(cache_c.Ξ΄[𝑖,:], dims=1)) +# gain = EvoTrees.get_gain(params_c.loss, βˆ‘, params_c.Ξ») +# assign a root and grow tree +# train_nodes[1] = EvoTrees.TrainNode(UInt32(0), UInt32(1), βˆ‘, gain) + +# 62.530 ms (7229 allocations: 17.43 MiB) +# 1.25e5: 9.187 ms (7358 allocations: 2.46 MiB) +tree = EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.Ξ»))) +@time EvoTrees.grow_tree!(tree, cache_c.nodes, params_c, cache_c.δ𝑀, cache_c.edges, cache_c.𝑗, cache_c.left, cache_c.left, cache_c.right, cache_c.X_bin, cache_c.K) +@btime EvoTrees.grow_tree!($EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.Ξ»))), $cache_c.nodes, $params_c, $cache_c.δ𝑀, $cache_c.edges, $cache_c.𝑗, $cache_c.left, $cache_c.left, $cache_c.right, $cache_c.X_bin, $cache_c.K) + +@time EvoTrees.grow_tree!(EvoTrees.Tree(params_c.max_depth, model_c.K, params_c.Ξ»), params_c, cache_c.Ξ΄, cache_c.hist, cache_c.histL, cache_c.histR, cache_c.gains, cache_c.edges, 𝑖, 𝑗, 𝑛, cache_c.X_bin); +@btime EvoTrees.grow_tree!(EvoTrees.Tree($params_c.max_depth, $model_c.K, $params_c.Ξ»), $params_c, $cache_c.Ξ΄, $cache_c.hist, $cache_c.histL, $cache_c.histR, $cache_c.gains, $cache_c.edges, $𝑖, $𝑗, $𝑛, $cache_c.X_bin); +@code_warntype EvoTrees.grow_tree!(EvoTrees.Tree(params_c.max_depth, model_c.K, params_c.Ξ»), params_c, cache_c.Ξ΄, cache_c.hist, cache_c.histL, cache_c.histR, cache_c.gains, cache_c.edges, 𝑖, 𝑗, 𝑛, cache_c.X_bin); + +# push!(model_c.trees, tree) +# 1.883 ms (83 allocations: 13.77 KiB) +@btime EvoTrees.predict!(model_c.params.loss, cache_c.pred_cpu, tree, cache_c.X, model_c.K) + +δ𝑀, K, edges, X_bin, nodes, out, left, right = cache_c.δ𝑀, cache_c.K, cache_c.edges, cache_c.X_bin, cache_c.nodes, cache_c.out, cache_c.left, cache_c.right; + +# 9.613 ms (81 allocations: 13.55 KiB) +# 1.25e5: 899.200 ΞΌs (81 allocations: 8.22 KiB) +@time EvoTrees.update_hist!(params_c.loss, nodes[1].h, δ𝑀, X_bin, 𝑖, 𝑗, K) +@btime EvoTrees.update_hist!($params_c.loss, $nodes[1].h, $δ𝑀, $X_bin, $𝑖, $𝑗, $K) +@btime EvoTrees.update_hist!($nodes[1].h, $δ𝑀, $X_bin, $nodes[1].𝑖, $𝑗) +@code_warntype EvoTrees.update_hist!(hist, Ξ΄, X_bin, 𝑖, 𝑗, 𝑛) + +j = 1 +# 8.399 ΞΌs (80 allocations: 13.42 KiB) +n = 1 +nodes[1].βˆ‘ .= vec(sum(δ𝑀[:, 𝑖], dims=2)) +EvoTrees.update_gains!(params_c.loss, nodes[n], 𝑗, params_c, K) +nodes[1].gains +# findmax(nodes[1].gains) #1.25e5: 36.500 ΞΌs (81 allocations: 8.22 KiB) +@btime EvoTrees.update_gains!($params_c.loss, $nodes[n], $𝑗, $params_c, $K) +@code_warntype EvoTrees.update_gains!(params_c.loss, nodes[n], 𝑗, params_c, K) + +#1.25e5: 14.100 ΞΌs (1 allocation: 32 bytes) +best = findmax(nodes[n].gains) +@btime best = findmax(nodes[n].gains) +@btime best = findmax(view(nodes[n].gains, :, 𝑗)) + +tree.cond_bin[n] = best[2][1] +tree.feat[n] = best[2][2] + +Int.(tree.cond_bin[n]) +# tree.cond_bin[n] = 32 + +# 204.900 ΞΌs (1 allocation: 96 bytes) +offset = 0 +@time EvoTrees.split_set!(left, right, 𝑖, X_bin, tree.feat[n], tree.cond_bin[n], offset) +@btime EvoTrees.split_set!($left, $right, $𝑖, $X_bin, $tree.feat[n], $tree.cond_bin[n], $offset) +@code_warntype EvoTrees.split_set!(left, right, 𝑖, X_bin, tree.feat[n], tree.cond_bin[n]) + +# 1.25e5: 227.200 ΞΌs (22 allocations: 1.44 KiB) +@time EvoTrees.split_set_threads!(out, left, right, 𝑖, X_bin, tree.feat[n], tree.cond_bin[n], offset) +@btime EvoTrees.split_set_threads!($out, $left, $right, $𝑖, $X_bin, $tree.feat[n], $tree.cond_bin[n], $offset, Int(2e15)) diff --git a/experiments/readme_plots_cpu.jl b/experiments/readme_plots_cpu.jl index de2a4864..f80ae569 100644 --- a/experiments/readme_plots_cpu.jl +++ b/experiments/readme_plots_cpu.jl @@ -200,14 +200,14 @@ savefig("figures/quantiles_sinus.png") ############################### ## gaussian ############################### -params1 = EvoTreeGaussian( - loss=:gaussian, metric=:gaussian, +params1 = EvoTreeMLE( + loss=:gaussian, nrounds=200, nbins=64, lambda=0.1, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, rowsample=1.0, colsample=1.0, rng=123) -@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=10); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=10, metric=:gaussian); # @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10); @time pred_train = EvoTrees.predict(model, x_train); # @btime pred_train = EvoTrees.predict(model, X_train); @@ -225,4 +225,35 @@ plot!(x_train[:, 1][x_perm], pred_train[x_perm, 1], color="navy", linewidth=1.5, plot!(x_train[:, 1][x_perm], pred_train[x_perm, 2], color="darkred", linewidth=1.5, label="sigma") plot!(x_train[:, 1][x_perm], pred_q20[x_perm, 1], color="darkgreen", linewidth=1.5, label="q20") plot!(x_train[:, 1][x_perm], pred_q80[x_perm, 1], color="darkgreen", linewidth=1.5, label="q80") -savefig("figures/gaussian_sinus.png") \ No newline at end of file +savefig("figures/gaussian-sinus.png") + + +############################### +## Logistic +############################### +params1 = EvoTrees.EvoTreeMLE( + loss = :logistic, + nrounds=200, nbins=64, + lambda=1.0, gamma=0.1, eta=0.05, + max_depth=6, min_weight=1.0, + rowsample=1.0, colsample=1.0, rng=123) + +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=10, metric=:logistic); +# @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10); +@time pred_train = EvoTrees.predict(model, x_train); +# @btime pred_train = EvoTrees.predict(model, X_train); + +pred_logistic = [Distributions.Logistic(pred_train[i, 1], pred_train[i, 2]) for i in axes(pred_train, 1)] +pred_q80 = quantile.(pred_logistic, 0.8) +pred_q20 = quantile.(pred_logistic, 0.2) + +mean(y_train .< pred_q80) +mean(y_train .< pred_q20) + +x_perm = sortperm(x_train[:, 1]) +plot(x_train[:, 1], y_train, ms=0.5, mcolor="darkgray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") +plot!(x_train[:, 1][x_perm], pred_train[x_perm, 1], color="navy", linewidth=1.5, label="mu") +plot!(x_train[:, 1][x_perm], pred_train[x_perm, 2], color="darkred", linewidth=1.5, label="s") +plot!(x_train[:, 1][x_perm], pred_q20[x_perm, 1], color="darkgreen", linewidth=1.5, label="q20") +plot!(x_train[:, 1][x_perm], pred_q80[x_perm, 1], color="darkgreen", linewidth=1.5, label="q80") +savefig("figures/logistic-sinus.png") \ No newline at end of file diff --git a/experiments/readme_plots_gpu.jl b/experiments/readme_plots_gpu.jl index 20ba04ff..cbe94b19 100644 --- a/experiments/readme_plots_gpu.jl +++ b/experiments/readme_plots_gpu.jl @@ -115,7 +115,6 @@ savefig("figures/regression_sinus_gpu.png") ############################### EvoTrees.CUDA.allowscalar(false) params1 = EvoTreeGaussian(T=Float32, - loss=:gaussian, metric=:gaussian, nrounds=200, nbins=64, lambda=1.0, gamma=0.1, eta=0.05, max_depth=6, min_weight=5, @@ -140,4 +139,4 @@ plot!(x_train[:, 1][x_perm], pred_train_gaussian[x_perm, 1], color="navy", linew plot!(x_train[:, 1][x_perm], pred_train_gaussian[x_perm, 2], color="darkred", linewidth=1.5, label="sigma") plot!(x_train[:, 1][x_perm], pred_q20[x_perm, 1], color="green", linewidth=1.5, label="q20") plot!(x_train[:, 1][x_perm], pred_q80[x_perm, 1], color="green", linewidth=1.5, label="q80") -savefig("figures/gaussian_sinus_gpu.png") \ No newline at end of file +savefig("figures/gaussian-sinus-gpu.png") \ No newline at end of file diff --git a/experiments/speed_cpu_gpu.jl b/experiments/speed_cpu_gpu.jl index 91f2c657..27afbbc9 100644 --- a/experiments/speed_cpu_gpu.jl +++ b/experiments/speed_cpu_gpu.jl @@ -1,5 +1,5 @@ using Statistics -using StatsBase:sample, sample! +using StatsBase: sample, sample! using EvoTrees using BenchmarkTools using CUDA @@ -15,19 +15,26 @@ Y = rand(size(X, 1)) 𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] -𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1)) + 1:end] +𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] -X_train, X_eval = X[𝑖_train, :], X[𝑖_eval, :] -Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval] +x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :] +y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] ########################### # Tree CPU ########################### params_c = EvoTreeRegressor(T=Float32, - loss=:linear, metric=:none, + loss=:linear, nrounds=100, - Ξ»=1.0, Ξ³=0.1, Ξ·=0.1, + lambda=1.0, gamma=0.0, eta=0.1, + max_depth=6, min_weight=1.0, + rowsample=0.5, colsample=0.5, nbins=64); + +params_c = EvoTrees.EvoTreeLogistic(T=Float32, + loss=:linear, + nrounds=100, + lambda=1.0, gamma=0.0, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=0.5, nbins=64); @@ -38,7 +45,7 @@ params_c = EvoTreeRegressor(T=Float32, # max_depth=6, min_weight=1.0, # rowsample=0.5, colsample=0.5, nbins=64); -model_c, cache_c = EvoTrees.init_evotree(params_c, X_train, Y_train); +model_c, cache_c = EvoTrees.init_evotree(params_c, x_train, y_train); # initialize from cache params_c = model_c.params @@ -55,7 +62,6 @@ sample!(params_c.rng, cache_c.𝑗_, cache_c.𝑗, replace=false, ordered=true); # build a new tree # 897.800 ΞΌs (6 allocations: 736 bytes) -@time EvoTrees.update_grads!(params_c.loss, cache_c.δ𝑀, cache_c.pred, cache_c.Y, params_c.Ξ±) # @btime EvoTrees.update_grads!($params_c.loss, $cache_c.δ𝑀, $cache_c.pred_cpu, $cache_c.Y_cpu, $params_c.Ξ±) # βˆ‘ = vec(sum(cache_c.Ξ΄[𝑖,:], dims=1)) # gain = EvoTrees.get_gain(params_c.loss, βˆ‘, params_c.Ξ») @@ -64,7 +70,7 @@ sample!(params_c.rng, cache_c.𝑗_, cache_c.𝑗, replace=false, ordered=true); # 62.530 ms (7229 allocations: 17.43 MiB) # 1.25e5: 9.187 ms (7358 allocations: 2.46 MiB) -tree = EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.Ξ»))) +tree = EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.lambda))) @time EvoTrees.grow_tree!(tree, cache_c.nodes, params_c, cache_c.δ𝑀, cache_c.edges, cache_c.𝑗, cache_c.left, cache_c.left, cache_c.right, cache_c.X_bin, cache_c.K) @btime EvoTrees.grow_tree!($EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.Ξ»))), $cache_c.nodes, $params_c, $cache_c.δ𝑀, $cache_c.edges, $cache_c.𝑗, $cache_c.left, $cache_c.left, $cache_c.right, $cache_c.X_bin, $cache_c.K) diff --git a/figures/gaussian_sinus.png b/figures/gaussian-sinus.png similarity index 100% rename from figures/gaussian_sinus.png rename to figures/gaussian-sinus.png diff --git a/figures/logistic-sinus.png b/figures/logistic-sinus.png new file mode 100644 index 0000000000000000000000000000000000000000..b1f511dcd4ce3be1c1c64fb9bb862c9042c05dfc GIT binary patch literal 93334 zcmaI8cRZH;8#jIuy2!dEBq3edD`fALku4-U*(;mu8IlmPLpC8KdnKD>Z<3Lboy~Lj zKCkEZ=kL5H_@U7h3eIga-_&M;NwhXi<(cnE?J$jeEqBM7Drf?&KwVZtY8+F||h zFB}uahtkN!bud@6;%hRIvX4&TUkIm|E29@a62ujQ7Fx=)geP93fi1;qX_)+Gt7R!W$vor%&?_ z*vPtDiNxw$HzrDa&(@lawkQdrF0V2K$FUe|p5}ZjRGjw5#DpL>cljJ4N8{;EiO+uF z<;N!no3rKS{fCGAh#&1M+sQ9Xab1RItJ?Ijf5I5jzRI>{UEY}te?1n7!#<%+;fvE< zykL{E+Xn9Q!8E6HSCy2Mh{R5}(nMaumx&>W=;2(T1OI7x`dLv!!?|1I$@J{`KfdH% z*GSp_Jrb!ah|$d3a(a@m=H}+%LhaUG_=!f?of!EsIazBp%#0<0Ae51^#ag!_W#bfF zq;TY&V#Q-Vj@ue=G70%JNN)cyF)aF_4rw)JG_;IgN}6HBZ#5~S z2yIPteU#3X8q;Zt3J7X7uAOF&mZLIgw`V^M5yvec!#z!=-9k5g`sIAHhqYMq9x2}S zFljW&{I+|U&g=f=U3cMZ)uV`7U*q!euX=W?cNu=YQpHk2li2XS{1Z5+GQ<@oo5e~N z(OGw2qc;~t)qWp8D}snMG-`jgebKSx@NoKlt_CaF6@)Gt^)7hcL5D6HL8R%_3VvS; zZFT5B+Hx1J)*~b+c>C*@Nrvjzv77QSdcQO%!`tlMP1`LyR&SXnMYWm0*^pGD17r|{ zK{8e#UNH6V)S@q3SVbx+KTX3w_{qk>xFDhI*Q`^Af`N7|5u0;ISQV0Gb_56v+$B7Q-ZZR3cDJuGD@DGB(<&Bj+b4Yg=wyV^gKe^UoQntFf`l;VfZ20iZ zhdS!&L`z@4Sd;mo ztSmDNi#|K~=@EkPzp1LI5S~0)53>HXlR#u(tjYCd{rvUGO?;e$?&oDXK8>?B5j3A^ zB!3fhW~ve5`VAELxs7d1K!*wi6xI-pX0j zJ>2cKa$q*yX3Et%&xs&+{-cjb{1!7{YN{ptZJvaR@al*a30|;5wqxG|zME;LT#5W| z7)Wrd_4-V?7`$Wa(+qF|F%U);mbJCDr;R>m9UXE{pHAM55{^kv-*l{=dAV8tklN#8 zWo2bejUWdHhwWR{B=&<*-IQzO>?_WK6B^=D+g&UrUYjpxyE{9p2d-2r;}a78`}Z&Y z-8-*e4;wxOl;-Bi;Frp5h>-ZkscgbtE-4 zb$@@~_k1^fcXzk2py1OoN{m~lcqBicDF&@wVjv;Q4QRwt<;gu3e5o!tId`*Rz zj|@v?U)c&Bwc}6<6!dBslS( zaV2ID_jjjaw>jbQ7_w<}EIKyM5+Q!=^%dJq#lv+uv$@QEa@Fkz^6Dv!{K3 z?<$kSf7pZg|$u7;%!4k#SMH&Nc8} z<+E*^-#tS7YHDf*b5)|Eq7vfcm6epLr~js;q>LK8jO<=K_G-w<$#Gilv*e+ynzXH& zB*3{6+Io|UDkUwgucwDJOxnZ41D-xIw|n$jc44B@Iv?XdtQT`l{z98|PGPeZH8qqz zsUbKqkLWk6m($3h49gPz7F07Z@QHlX4!y!|>T6kcHa6lgs_%w@OiWBr-n!MFE_QJ; zdqIuYvYhtPZuFDBXT2yfYJIZG{^Z{Vf>i6Bw_M|}nf#)ZHQ-)9!#AVTGuTez z6c#7#8`((lxVgC%6%`Xo)Z!8nmKPU~&U^-L+W8Pzw|hqFu@hlo@+K>>l99tfMkac3 zTGkqR+uK`;pW2v{qSI8JkrWRLld3&i{m=S(kyfFI$BwDBHA`~ump=*4jih%k{J+r`H%I5etCI0UG%F<;4vCteGjFQCMn;Nl2PbUWD$>bbY9&n zlcJ&4t@o@HTS`w!9s1)e!P+s$Jyg9qC2hBP;Q3TnmlRLN*w|P_MdjJEXZ%!uH`hwI zG&2}Ie|JPHCa^z${(ReAI0k+7>QyL(;hA2dvqqfG&dv_idZJ>-h8K_9j5r;goH+Ep zHqAT8(sgd8UoCq@WHt>8j)OYtDQkSxb8Ts2X}I;llGLFK&mKiC-xjm2q+^Uxi@X2Z zS^3IF*bC00RII|Wi(N6HAW8N&4DvD3Z{NxVp|AqN!$0VnWeFS)j>GA1g4M#uVGhYd7`En1biY zL%Qf^?QhsJ{`c?V!PCK7qhM7Mn%1G`p=G$^s^4@8s*6T!9nYdNBa8H5gZL(tE5SC zZ?PmCs$(-$8L6YL1-JZ_Q*@t=t|g*F%RBN@1<*QBF(4R7M|W>>TH2ChHFI2e>%6LR z*CO|W2aAr?Bda5>xiRURbxDarX%5wiJ|Sr z=f2ykfBpJ(_JqQsA{#q96TN=h~dx*l%0 z`~%1q_4+kMwxi}HSRiO+BT+kn1r-BLu8 z^!D~9CMI6FawRe{(vsW}meT&(XsgU4AD{C8av^T+z|1_^tfQ}PVE|eK2p5lskOhkT z-i7w4&6)b-iS_k$fOeFT^ur;dA|jCs{FM`V#l?%eM>IZ%#xGuMf3cnHOA&}oPCkLw zCvknHrw4!PI-c^w9<`V!JL3CSnvGQHqB~4K&&(J=4+W@VJd&-L0C0rQenwbS^n6G| zC0H_}p#iEhJ$2Nw1OLb;{mZX{O|`nOG<&*)k@&iEq3nGRT7noU)nTR6vXFY;DWtgh zA6~E?dqT6(p;v<(IwmgKWi+R@#eZwQHI&A4C8O}G;XEl#PsxT;BQ@eZ9SpA3w&$!fc$~bm0G7Ud|+vW&#~y z-r*21)#Ml3r|X_aUZNFW(}djE85kIZgie_4C<%Jm2-#3#x4%yRt>6;LS0_eP4GoD9 zid(nJ$8Ees8^3({lAHT5cd)tCrD}3|a8N@-<1R6Z6F*Q;K||v@Vzj%|izV@UdAZy4 zGn~k6Ay))RQO;$VV&W@LU^m`ind!872!jEs+n3hjwxZczj|tF1KSJjn~fLe ziHSF<+F?&KNC{%`_al77#Q$VnU#;jA~=>jvo`c$ zBa1MmAj8MGii6Ylz5jgWM-(FwJA$k(A_#B`Iz{=Lvme=68mysnVMcCcg&N;8+=BqS7 zcjx%2fx(&f^p!SF>{vm|u(M-F<{U=n#avJSGi!-P%ctE@BfTfjMSRVsWD3rfJ+>3} zBo$$3r>O~7A{AlohY#Wzs!gQ*fO7!Y0FMg@z>Gn^pi1aojF$UYQQ>T2Vq)}x25DN@ z=43_3cIxWtQu`dvOVj-q)viW*1BEqUQIVHt;q1)A!BO(@Zs6WBZ^2_{8oVYSN#28HMt8@ zgk&K1P1n^s7J;8c^^5jXP96DrO3PF-vzb`tNo15?M>;M=q(}8*gzp5%xHMtQR z8(XMd@@3qHiqPE5?7I!`tk22D(3q#ECz=G;dF|ic9^BwV^!rEj6m`2QKSxKkRaI$8 z!pKgCEOeeu1h->lmEvUTM3n>8SlQTV*n_iIM!yhLkYhHBoYPj*H| z;)TN_4FdOu62&F{Jw+X-v{h>-p8%=fs+FzJBSdC(=O+F>=Sk)AEB%?@=WUN~ys zk1Mn0p-fdy74!*CdgkH+J!-ho1NX8PSY0gxTPgCvIrM|=TP?TXX-p09Ed5y z0m!Q|npT6s=`R^o4 z<_@+kyg01M$-zO0QCLPTc?Knhs?znleL~R7o|u*0ic_eRc79=NyAev`^YwXm{34ar zQIB@CrNt+)3ffci#FP|xdfh5pg5Jk_|28Hya@lYK%f=*=Xb!uHVpycuxpY3(12(Z3+$I!;f8Z8HlgH&b_^XP2Y--^1qQgn6I8cW#<-{ai`cQdD^bOt4nvu zwhoUN%bz@Ra8-_5Uf_vT#@kL)W_I@Tn-}JG=|W@?7zpsarKKfeEMu;lSQ0pa5`>|! z?&09(<;6iLXlTsKbjTvItE)XcJ(EDVbFcgP^QS4704FDLR6CP|j;bp0Eg&3{(Ld@> z{%)+V<6~h8xvsDL`Lpd=9~T#gCD9b!lq*L|ymfSX)TV44$Itj6u=#@=nhq5J4?k05 zx5UlWRV!apT^+iYWN6v!W|TA?D;bLmE;b@pfAsJTl_j7){W5uUOm8w@;60#ZXnCI^ zI%A%65hFheFFydYZx;A`&so?|7f(d(Qm3G2rwDsELjQRu^Qz0%@U3R+e>@Fp$#?JG zy?5_k{-TaP`zYv%73?Pb|MrUwFXm@x0RO-uC5w2@o4mHVstl}$n;Y~qSZnL9wF=n* zP6ty{dH@d|E8-)53y0sn1sRltM)Lzy)YsQv8!5hmypD=G*xP%TkZ=_fBU%nCZnU%0 z;(4~?!uJ4o;lDt-k*qswPmuHehE3w~u$GpVloTqeb|v~)_573)6ckV2fPf!1yo%ZC z`I?^dEy3&SHlpcbJUl~VV=Io;K;1H|upbM?p!L}k2LF)m1pP;hixcQC{-Lq45uOZm zNU{hLEWoe0P8b9@txZiJr5RM&O+%geJ6($#07NTV4jnD$?B-@a{yF&PGAiJ!!+d}o z+HoQk@$)hB?e?wgSll71qd}0I+}wJD&Etao$b#>hS4)SjdOJEgy1D|mqC9D=mTA1U znn#MCP69Hu8ACW6%7l=|Du;rUk!gZ1c=5$@^s&JBmUpvpTQrS&G%2+!B0x5Hi%zu4 zy-h)Jh@UGt@X>^aalpp0cvTD+;FNkDSO|)tdXw?Y+dnC z=G3Qgz=9QEPNykjI3+&L>RnjWNglY?LF&q>Wo2b>Te`XwGE@)S#m@d?a3Bi=G&?*w zdCgl46q9*}*RNksxy^bHSnTyAasvz!OuekN>B+K!{$W&6gne^Bx2 z&X3pJsZ@x=Ky!R0oe4FwOos{|XV~h7e4o28b+z7`g|Fi_?99xb7iR}mHsdX(T!28; z^MiD2`*y^s|pW|PaO4< zHynYI4)PU#t5%T?1_8px#s=jb?guXX&mLmz?d`1^rxM)0 zAi;#Xq#h(;ri)$JdLJ*)yU(fhtxp8Gg>F$%IvS)q4UZHo?nG7iQniPMgfwhN+Mgbe zHj4V39_qVI?+0^u8GCt6SyfWvhcKkMOBN1?NZ!G(os!3CEmgG`ptc&ld3?W~%gM<} zF}Eq<-8*ra;*t_j4(-dx2)5m)HV+Cl3$oAXfI4NXkK6DTC=HufRO$i}kCr0|!wSz_ z?n__Jrd31D?JfdxNZLPbj3qS=B16pDW zYwO7h%fWn&ToC-;KUxA(<}8>xY!y6lCx{tCkdM!tlY$s){OgMaC=gw9GC;ve@&4=| z0|3Uni6D88SKF+<8vr7)3_dK#!2B=2nu79}7j_N;J0&nM@aD~%Ks;63nc3K6I5Ntl z@!45Fq!kZadDN)|(9R!%n%U9uN~>_X++2l|0#sJ|SW8dOnlE2?N%6Liyb5H!w6(Rl z5_@2KcCFmrck}XDTb|E-7e<0?tEbD^N z(yw3LU0r!M3dU{z-5@uGT`}+4s8AbDz~WF6yznnV3x2rCZx?y40#52ozIL7c@&z0X=l;{N*Gm z>c&t|&vzcoQTp-8eZB=dfR>q=nVz1$&cNQz&hFvEt93;DZ-6tv1sCi)YiYq>7hv#n zce(#*@dyZ}*H?q{g9p!d5*kTKNq;Xcb}l+9;H3$Be9p+2KR?-2sSU#J?WTQ>T}lxl z(rH;4ZXp^ed}y>7NTfBBSFT}4Ngo@Q$ykXyIsI{?&ZuzX7B(A-9B1TvP>C_wgrnb1 zPF0n5$!PAE?o}YUH)bz0fICwDwt6bUO`~(0IVUvk$U|2 z20tz%6Lpw$iZVk=0|>dYvfoYOT0cF2b_P-&AS_zc3I-c9bId{a^qqsgi9Cf@ydd;N7el-VPHLkC*;5nWTrTIHm?PO)uWy8=C|f8I4K&Q#H++6M3j#io&HA&l;&;QOriPrKsQ?t@C9q-foiq9YWvmZiD~#>rS66UKVaq?J^7<+ z2|q}gDDc03`-b({2`h$d;Dc6SZEbCa%}k98ON#8cjVqeOU*lu{GUzQjBv%m9wFm}! z`p=&~$8>&_rt>)49|h~gxKAjd+t$LObaDean@QQ>wA(Cz27ocmMh*+#BUPys&?M>o zqmz@}Mw|i7Mj(oMcnG}Ve$Lf*acsC+S6A25)8oLuzU^*ADHIWGH}#$fdE34CW1c+m z4T7Lf9W_m@9NG!~WD|!;Gw$CmIPqzuu?j-`a;rr2b8DyONea9L1Ozy!Pg(_>CP#{o zK}>_*2}C&pjG-rV(e5W3ilIcew2M%-Q(yl>{ER7BN31w0Bs1Q&FM!zxfY+?N8dR$f zADT8?0nCc~GRw!HFMMQe%9X`E3z*#sW#_|A=0emow7M0v9t47*d z8Wt7?N=_TybEAs+pI14Um|h5`4p_WvU+8r=Z`fYqb07(wy(b?t(1jp=gv4!`2tpPZ zOe^>BVd&(>Or!6`O#MsaB|R`+9r)qu!=y3aL~<1ZsxBN}JviC-6!8M}{Ce28H@Ags zcyA%%#Urnz%O>p>65e$b6m-3o%~I9D)YrI5L~UBWYgEkz`f8j)2DgocMV_uk>e5c# zIf0pcjE$vbMmUi;vHTLXHW@={cPHQ%_nW+{C+$FR8VOK?!+l zsXxZm&?|V?uFd~vwEcEMWfLU<3j2k}p5| zC&UBrZN;KQ{^>B1dTzIocXxMRoGyzQ);Rz9{@R7QDxUV^M7nyu3(eXZ6(NJmna@&B zVp~y;1IbjSH970I2lwxnIWBfN@C)C+57mfl@BwgoCKd~;7iz%d9r!=cM@L7)l1x!H zhsqpLwzwZrl3>~8UbNT41}5NQNx|sw^73zurFMURj7ZtL|MSoJsTv&SC0WV1lY1Sj zg&!Pp^6`O&^lyK^Sig2;dG`h>X%qd9>PFygEVqq`wnfL=gjdu16Xejqo*2UwvVTg` z-RI>c3zL4Q06Gu;!xjjtbont4f%uy6x5iCLf*7rYrw;crf}A7A2TubB9MgbbSTi#- zgb^N&)u7u((8|G5+7-aZQGfh+cyKUQ4t)v9CZ8Ko0Gf--8NY%EQxn~|@~E;neYfBC zbo{&K@(Zp+w0unaqIco;Q-RM^jMx}Rsbf)_N!hp!?B(6T-y5!g2n+M_#MPYJ!f$LT zWvI#pd$LU*^4-WgXQxkR)ZY8~ZQ%t)Pvtk!-C@-v6(+07<3#hc3|`;U1saW#%N^EA zj*qioZFn*<+VJneSC3uNj`~WhJK>oQ>`DH78{Yn&p0Cq(1(|p}i69n$ zi|Rz2HNaS;qZH$Z#$CbR{QH;ve};6R(hW`K8_opZP0EZoUu7~nJ2yv{>J;5##7@9j zJ>GgSbo5dE4;WqW7kxNK$0=yst^x=tGoZuznwAyu(<&su2~-_`m0&&k$=24^rFNPm zEU42ojsHbvP0jJ%pFvQKaz;AHISh*O^2VN2JAD@dGO11k8zHc4j%gvEVN^^$ETydM zewef)bSNOK9}7&NmYJF1)ChnDx9pZ~#Fh9ieQeQ@Em}wpsx6>Tb&)~shpc4al6B(} zXJu5EP3Z=_r7PxfqQsH_cT4q-Z$ySxVe9-B+@fH~`SyK}x9B4vPgXpX)p}{Mu~(D~ z@|6>Y@U00$ujE=rwR7|Hj|~k;$NUCxpvk0)h3(tFO>y(4gc@j{C@XhRA#{u5Tjzf- zE%Ea30F#Mo9|U(oUH#X=$)%f=Sn=^A6ytH5FCYepirSUw0CHYl%1liiFeOP%y&cuQ z0QUy$K1R}{UQuQYkYwSj)bF3UoiFv-Iq`*OmfG+kv`ISr&Oo_(dVAG$yCYlvI1>^Q zeln=9DJ^|VatFGR*=InAK>A%Q@JOBh)b|0HlCav|bF{7BK?D!;zhhnPX(~1gB_5{QGIJethfDk(P z=hJ&!@~HOPgb!Dx{b*4`N}-{lAoT)-tk#2~uUG48t6Q6`36e0HBrz!|TsDg$L`g{n z17Ro7XC8fbaBu*91lXoR8JL-%X8_l7a3}>jz{hv6K3;x{*9N#c^zvq-()xOR;Ns(e zIFHYK+)pid3Wlw!Y+vpN5`DkeK1~ReDJ*;poQQ>$^^ww)D2QYC7w)G@YA2pWd&1`N zsIF6YsaYb(a*1!S9L2y1oOi5-TMyxhyr~Az+$ioRr(YhvJE^g%OmzaxZ(gSeI6XAZfE5%ZnE}W?RoR>?u}F)hi2AL9Zs$5ICEBZq-`~G~MTQex zEG#1ZhVkcwp!`#u77rHv2kAJ7(U^6p+J6B$!o$<_y>DYhTbjhUm7mln}>&>AJ2oX4k@cQ3z+^!oPwxxJElbkez?Iv0^bRuY@Hz@0>!hn z^)iaZ`(b5cqjGWsG;?MqCiz|izvG*%tC7Y!{I833v%R<5QsH;Q!Y|g+kRTHfFu}Qt zvpJc_Z9Zh>!b%2pmsZZizIW!LcJl%g0gFOGQE|Rv&Fv_7B5K{nfwyt>!;}hfwVuI~ zC-T^VZ_(qUqYSZ~Lst6(Sr5VOVx<+wBtc58h6_x&aGxGZGVNs1JeYoIU)H_&%Yh%1 zYr@dL)_Ew7U%upT`d(&pr1NQTg#_{F2ATqnixye zDE~VSZ?cFXvAX(J{lN?iGqWz|*YC@_1o-%@+~u$El648MDLkZ1vhFHoM-5K-bVs37 zY(IO-!1A{8j>k@bA~P`r|R5(!2e4Lk_o)Cf4H7>n{a*hCK7TT(&qtmgxdl%3p$&+%k zNXZOTXlu)hJMuA*`~YT7fQ1P+c8Ggnzwh@FU7>b@NBvAp3?{;U3unyc`$3~G9&!gk zKIuZ%9EEt zWILx^>-GAz{k44|y3>>|u+#i#YBj(C7t?S@Ec1IsPyCGuqLup8RrC# z4}WayUzA;2>gW&k=X%siU#Y9FuHLe|JBa_>tlR^7Om{rnkJzAvuX5<{Hj_(}@mZ9} z2zaEBkWppK)7&N!Jv4&f1EOaq+J7Mo3{rwyytF8=-PDOdZR;`H4ZcRMRjdnW1DFgT zT=^K3xN^Ga^wd{balf)4yV;E z<|T`Ol&#uECNOaDJ#xM)wN8dy55v;K_Yh7q;>;WT zn-o@3h1^K6{Aod=e${58XnHco@AJFkzk+L5Wx#t%Onk!Hi2Vr{P0~5M_}@>jkfT1g zhTUATGx=hzNt-Xe7YDw+bN*ND_FlzbV?$veAF{HffO0P?f~pfHotb_8WXi5eAzRRM z&%(uJpM<2Wu&@vu#YIO91UyrSKjq_T)02q`T}G@U4my`AbAuJCsCM8D;`gsTht zp7WJ<%H;14nWVWapUEI3zjh(;jFXB)be%RY4VCWWgLRdEdQ+iBPb956I6O zO1_L~l0>y<)Pqfjf8JnLo(KYNW^}U=>md~+EqO#9#IfK+gU#7E>npR2*;`3#RMz9- ztQUIy4l1VJ?zULAL)_%XdDO*W6sUKAVSy|voLc&m1tADTHkU@lvK(>a5!ncv>6-na zd<_tB%N}`rg*@K|paVuycK(wf$vY*Z$>4MVtdEWct33aXg#K3hiEm13A~jWd$Rm32 z0~y0xdWi8`hpZlG-iI3UI;4%$1~LLvgzYxG53|&Ca9ODcTf+_Erou-Hp#L?S+=s;G zE&IAM>rr;7Xd)s?y>@`*!Z?-T52zLR#XEo}Nd;a&yg0gbX?WbyL5=Rn{d|Uuf z?!jC^;YtRrSklROJ>wyu_K(oFWMT%^#dr-Up!^k!_fK>g5v&;*6x9bStgG`V>^}Jgu~P zY-ng`^?eM2pSgp7AeUpLT_;V4acR;4&9Mr-hBV<)A+#hp5mxxEZ>R($?=3@S^94Yx zN`ROUoFa?B?uMMU8CbaH<`DF}k001c0l^~LR}eWK^Ctpj$q{($5Dv;Ds7(?)=w>7M zZu9;$`163;1RGQt;qK;FK9Y~&N$zc+OP}2geNa1S$2z2?G5bW8?^aMX^5+hs45y!wo4(CtANS!X`zkg0#x3@V7dBuW> z09(%2JO<5AD%qTea^GDzLlvcKlF%>);dO`Mg2&xa&vs7EG`}z+w4`_fsePxXr$;w$ zkG6z~bEp40JBc{hY`l{TNh`SCGjLC~KuvF6*goyQIPL#Vdfy|~Nf8Aqif7vv@7OxQ z#8&{vMUh-QJQYBh7vFu;Q%>VT{Q8%3KYaxv3nCfLVkRuR>*4D8;GpMIC!wRUkWz|2 zch+rqg&xR=JYWOWm6?h#ixIB`bZlrcnVEDDcY#);rUv=hy!`m~g=TBG-)jhd8D9N`$T7~+8A(#g1TU(qV#u58w zH6tJKHd!zSd1#I&rRt>WhGg(rmG_2i4cQ)cLoottgJ5bt;4VO4pj@i^=_x7DRM5Ca zuZZFKp8cCVg8=jr+7N*w+Pyi9vB zQ$~Wp0iueb7d5}b2PlG8yvn)Um02~D8-$^)merk>AF=;rFe>Crb4Fj$Et~5)`_&dP z@8EuRy#Ip-uh*pePUU2Dn@O}n_7I-8k1AiWCmW!o&d=<89Jy+ zT?^Zw%E_UB{&5Cfb=%#`!Vbl-?K^(Ru~kbJ0j=&@uq0W;ji`2-Jj3{x!j+R-N_`=j z1h&V#jEbOWhPL`!Lpor9Yhq{QsA}Y|ZZdNXmjh7Pesq#U5w&VyC(iYaXKLbkTBa zZwciqC&ib$0v_fS6$M?K{x7_ggC_}L7n8D>&fi;G4l*)r>+7t^y@WV{Api8Ln^x*B z-^%~c{;8;_XKf!AlDlx9S^0bg3fs7erbE>fL1x?+A|Y6jF6!+GG3IN^RMB##78a9RWTqoy9s9>c5&ujE|Eo z{72>QF|JF`z0bWIMME9V8Z9% zIlH<-fUU9KepWP3sAY-7QVQaG;OuPGalX=|kImM)`sfURp}*q&!;W6QQ6VZqQkhFH zMT#kGW%nqcnKrvO9Ym0_vKL2bLyJvjd*P#ozPFK5 z2>3jt@!pe-T6yMBh-y(~{C#k8;Soi8j{}}1%B>|p zyWEA-TxX6%uKL2}NL zCGFR*Ul25e(GGeN7`#yI&KG&Ds>6i`hiuzC$^j#j8QT8AesLt1NAQ!&PpbBwl~PasR#$Rwuk|?^}C4 zzL$prU+j1wxwx2%3UCsCdVf17zHZe>(SY5wu8x5LCIUe<;P{ZK;5^28ao&1^P0iQZ z)Kt1yYZhO|G#)p(H>8IAoxJKc#N(cF-iqopYT?g;5q91UZs8N$%xm<$9D|gcKc3iT%*lOIDvO!J2jah)))|W6KIeoC-*+R zT>fC{DowVg`ujir#_vbh>eVnU%Y+HWc-I7pLY6eT;I_A!YzHX2Cv@M7Q#kVSvBG$V zE#P&y1^FWK*Vyk>e0+RlWUvF9<@6`|Lw_Phk8ZEei(}kXbr#g0A~nxUxOOjH*aNh7 zb#(_AJ6YI1Esm0h$j0xsV?6sHL`#$G`2Qt<1aSCA1-P^Z>jNlQGdOi;WL%Ufy zB&4i)k^oJE_W+|n^z`9zZ6|C0gwB)^KUOK6ZXz|2zZgjQ`8d1cXm`5r%~npbh+x`R zvzrI*!d{|cU_v@OgR{%~Aluy0F{X7MLR1hJ2o2T#q!&ip0UAiYB)8_rmGB!ZFexP~ zixWT#BQf0EyGxy)H#av&EqE?Yf5tCq&1wA?ls~rd4??npp&DvxWgp6%t*pX(&2k@) zzyu};I7-~1IlmmMq4B*V0B_B{et!gF2$dhi97U<4AVNx;^h$JZN6Oz~jz;tr=2^TC zvuOo#Wln}=34^6i^6E%8?v zk1z_A0Ut>5`3|U!`a&N|NziRS4f3|lknd8J{MPjvCfkxH#PA<7BMOaQP20<4dVlocCYYO6cV!OcJJg zuZ5ho3a7Wu;lkt{~cu)32rAze3a*yPOT?6Q}JA zf3$#d1!)ABh=IavMAZF!YEA~S29Ty2yx}2NPPGp5OMFQV<|+P>;^Zc$06Y0Pn% zUG2t~zCLA(#^3|5^DativWW^9D#%cI5k|*#F!Peb3${?EdcL5L5TD%?$_>cYkQCvq zAGx0h7q-7bSo_GU^vG%T{2|*Z7%Lk-Jy(x1duo_l1}2*L4f>NT=_tXmfrzRv^Jto=pe_v z-*jYQV^dUKe%*?h|IJI`G$4;d0(OgC5Y9~De_;aY6|KS>tq!~=jy$x`WkWP7| z?(XlK!uZKx50$o5cA8q*+0mmgl@B2JZc+ve8-jngkgevcLE^N)d8ngcj1OkmQ;Unu zAT^n-?cq^9ZUb$6XizTZqblR0I30=*7iZ_D2}=;v>7tvyP{1q$WRz3;#A)A7Fkqe5 z(<2fjh+kX5YTe1-S_h`sa5*#x;jnjxkabs~YzY{j8oxY5T)61}Ulw2-gg2OXECv)^ zSC@i!PpNg$QOyk}V8Q)=>1+Ns7qP2%dzB>J+zwLRW?(o1bR9S|IrJr?Q0E>5p7}U% z52$`f43Wg!tJLmuN6YEC*z=J=X!$XDud>?^j)3;cLwWXef^pA z$~VK~^O%I_t46cBIwj+vdPDI-7k>Ltlj0LfD`!ojwE7lT%K|%om4=^p^ZWB$QjRh6V%yRVDuYS0GD#8v6 zwCSZIoDm>WsaQvF{*sdwD-Ax6ufP0TeHB5jKgK{X{b{MA7Wii-CYXs)OBe4{UxPit zE%~}_HHB>jGl>4J<{xY!%cfD2vadBY@OA|<#G(DR8!KDS(o=q6>i)gG3J|B;7rLFZ z2lb-wx9jw4_U{X5XEmtZJ-d2$9Q5jc>*f7E+YyF)?!pjgE5%^OmXEpgtn=eZp_RR} z7Utt~Gc#K{#(ybcA%P;sdZH|8ryGh9u32e+9<9;lH@XI;ZljueSz2%4Fsgldb9Q#N zWOU7#@Je{*BYG#Fq4T-9%WV?a+1IDV9Osah>6!TE_3I|T2z&3s2UN=GU23FHKz*H{ zDf-cCQU0u)e`FK9(O+aDVV!eas2?3J zV8cv3jD3~s7VmP`!^{cZDU{B-bm(DE=S^S+8xNXQ!Z2_ZgJ>D6`YMC>r>UoaL| zie`m=TJPy%XgFK&xKNl{Wi=Yqjl7p!v(&Ko@sbSyrw{F5V@bfkFwCw;%k6Eo5?w*S zi5a$1dg*9XiXYmYXlPCPsgM)@X%4L(eqgi1Abls8p#OI$Y9HpIJsr$@Q{6_UJfxIw zqp;us+?zy`$QRI_HWT?c9B$3WGN`B%W#|^?(8Z@JFaP|HVl?Ub^R{Vq)opr34L!Y{ zMMsdQ{=%Fslsfw7X&<(69yDy3WvcL&h2{+tm+36*Oeg^ z`f~;pRd7xJZ?qXf0uZi57}@rOmmtu`;)*(fP_&^3gEWY$mX>Tau2cqO{9AET3hX># zcmipHTMaKm&@BdUOS?fLT}IuB8bmV8>;5J7=f`)4qt^Sz_1lYHnk{p;4gA{NIGd5O zrp83DOrXd`wCR?c^;SDA&sFhL_1RWU$H%9D?mjv_T|Xw@pCSNk7&yH&9TO9iTD~SH z#f?j47}j|PHBR{xRDj%VG}bX{Si*cuJ@bTTb|r%e}fCp zd;=6o!qDej<2Od_0D67=EF`M<5hnxU6#xWa?JWKN4FpE^84aUNiYOP zwhBrfQVJFV28GPtmfxZ!}I_o>knqnH)h!l>QJ*6lkobLJqQ;Y)I%2he81}Q zH4eTflRr)_M_$kV!T;~W8)Hs)`{A81-Ip(Zv7ei>7XZJPD-LO=M*6g%}8pnxleaT%0b?op*lA*G>~Abe1wn0AF1cg^AK3x@(uO=%9{j zzlIuJ6aA51Ec8vdyjenU|9XWe18XTMB6XTWq_S2 zG)3ryZjEQJ=ktt)zy*MOb%zLr1fGVsFm)7+-mmS$K$W}uA;jpgkTDzHOUbMol791U z@NSu;33srv(^&^*{4S%|x{qr90?o!7k(d$KD9bXCV ziF7z1uDwBc<5mIHTTL{d=s9~Y?UU{2SD6{{dTOT=FZS|qM397w3qx6r>{O9T$K(q; zDbQ23VxOt)1_e?%Sb>-{Vdp1RoEpbx58bUb;Z;$&4 z@2xH9Ca1ndw9DVy+uPsYA0ePlGI^h(j{>h^#8Jgm2KhKNDZV~pIrmdQ!mgqh-_e-y zJMGK4IXTfyA#sZ79ky0h=v&;m!rnM}0Xpu#iQII!#t@H8ukHS_w%9-8$K6qy62K2^MQ1f0M~&7l7~U3l{G zUM<4&xgguEvqk3u1J4Xta%4p&toQ4L3Iv}8<_IsQ(4=*wk32#v7)r>5j0lPhtv+Y< zHz;pbc7B^5M?(6^Gk&8T^KW&{Sga{ySQ7iDSfa2Fb~vyV+B!ORC+MWKLmqOH;r>0I zUsQw@0f31}LYE7*JwCbh(z~URs7e)8;!D%Ie<#P2E>4#B*bQA}J$xr>WOs)pzpRPX z5&s@oDT_VfnSk-9el5pn4}`L%WZ1_SzI&fJ+<6dn4Tv_*Xx>NXSHUmeb~*;(0f8 zscnjSoXhsGGZsf77M+zvQ)xICaOJMPQyE z{jf%R#909;>T(@mMIfNy*jxWXZZ`>_+Z(c5{v|ylwZv^+-l$4nfah)yG8RV%-uHk4oiD~|MO|^ z%RdWO);`3tWRk7Q-9CiSZUQs_L4e2X!#bD19YK6dPhWrR%f_G`RoX5d+Z8zYuOHFA z`D`k`Yhfc2a$H~=W4W5U-#-L;}uJ(J3=_%icq(5tz*`R?7j^{%N3-ZYOE>Qka+7$VW3q8|0# zcEJXsaxyZdr@J2@a*XIVl!$--0zy=St8_NvR-a=Oa%T|&Z^Hz`CF2vPUjO?%eJ}8V zu%f&t>UDGXTQL{+Lvj8)j=MD1eRTpH)vj^@>4gSeuD60Dba*@tClkK7byJ6~oR zwz2Hlhg3FAXM@bp2pAaj{!k}O&U_TTe<%Pa~QNm1sGIZmE47O7)k6gnlnMPaa}n_ z|Kg3g+8~hBI=y&A1Ug9Yzo@qb$Nc8LdMvznLFQ|D)FDVmvyrbH{eO>&l=$>p+QR0~ zzbq|XjO|r7aun&9kJ@)3g1qLd*e;bG71c+(Nl#WbEjP`grm|8&Ru*k2oOL^Rc(_KS z(S=Ub^_F&?|M6ka>(kKm0N=BQhKAd>IpIfGTUkO7Gaxbk)gXy-SZQVOx7G!5c19(R zt9wnHpLRRSz(ZBQUVt3|S9zrT97?1tN^s=>1y5wbQj{H*<5NU$jvFg6o)-remSqb| z8wZgrN_T#m-9d*YF4TesyA5Ky;&16l=|0HWK~5IrV5e&*v%@<>l607o@UHRZ1Z4)tJhca6)-+(T7qO(RGIkhau5I1 z--dV}upzVOCvH88siU(?3d^^SpCFK;ocOd(CoA~-9=2Nd}eUk7JQjK9MD^Mu=xWohA!H z1NoY8OB&xK*ogW>I-vyxCHF&K11^vvGMxt`!Y1!9c;blFSDuGM=*1LSo`VG51uhsD zJpSyBZxMPu_J@spT5K4CkqSC-d)Ppz@AtR$!`B~UdwSAvSiLu0YxP)J?SH2(<8-ntqd5rK=?*6S~U z(Mc_{l}ut~W>&(94v7KVmt9~eW=OYGYhai^%P>>M1{k01X=j{qP2 zfKL??`uaz%5wkjIza@RD@U%s*_;GYPIrFv9kOtI$kissc>?9pA5{{M(mfDft0lnYr z6nc!JYa|MO@a@}mCmLJX9;UT=o}Gcr=plwqa?~l zPpeoWs?DmEgTJ;QuLu*jyBO*V98GP`vMbw{qx_l^;pvmKs`N6gK7Cumq`G`Ld)fJ` zV&KO+skFntLp6%|Ea%N~evJOmGJFqu(<0R#MD+#HhN$9l=RXVJMgRKR0zgAw`}zWx zW=1Od19!+kwqo+1MtJM%^ckI9xM%#M#%m!{A!fIfU@YYP17*0-39N2&R)lNQxQOJ}o-25o6I%X2J zz7t0J`mFag%fI|Q=-2t1nxhSgr=7CB7!(*-i$ZDNW#tdbK}(&sf)seG*q9lhbRx~H zv-QhwQ<1cbqshHHqgxrVNpw#AcNXNX4JL-QTcFqG?X z@Y4-yRxKIeruS~*NtY}9qwiehPzJ(no=YU6IE~7D`7*f4!Mo;LS+hB%la*yfb&nAP zO^7dm5UjauWF?lV#8)=)jE{FHxt>GE4$cHOw?~NWrOCAay7F9Bls&SyG!$JZB4OBp zVq0?OT!;1Dy1D6fB4@ znQt^Xu*vw0{~$?G2qf1U;H9oR(%g0OU{X+9FxMiP5OrJ4`ym&1^3g0ARSVjQvh(oq zdAPcA@$vONIh|lp7%nMBOgusu{Ces!r`W6Z8435^-F`kir@PC(~oBTe7b>o zYqZQG-5jx}fcuy(KenE%IUKLDlQAqYWLCV=cUY~<36D)rzPXxz1XPKWBJrQ)3Z%?-5a4@#fZ=kZcZg; zk9O4eo|W%iSy8%jTznh2kcqlExXxvnojP^haVFr<_u=9D$8)bn=k)GW8y9Kfk&nys zbvr-uga7Ozo7F7Xy_Q3s&x4k(T%Pe&Ja+RwIa8JH%?Y8lNLM*U85wf=j+Sc5w@nat zv$M0o{>aO_PHJNLv%D&-^ZiCN_YiZnJz*C$)&y(bSm&7%5<2K zJdT71(zGD)wSDNNCs%%w*ytxp7UhhBpdk+(xUAy5`?-~=L2d#}!`Xi`d@C@%OK|ERJBF*3*DO5rd@}OkYhHn>M|p-M!3GNZ zi9aNr@#s-x)RxP)?#?D|(8WPJjFbn}uOGAj7t` zm${>D2e~=S?q_v9gb}b~GG0h4qb;snTsT<|tcwHk^6yRwedX38MG6oUOZcMO)Oj;{ ze#)NlWQh>sVsUCNt^o310!8*LbTLfpq9pXSJ^OMG(Bvk0VA?HmO*jBU=pl|mFZV}F7ExZ&&EEd zAC96fPb^)JUqJ0lekqS?h40d*xHMW%=P}zQJ8$uO1XMGlep{jz<%01v&%6=0UHQJ< z_Toq)lwEXyq$tqX+wzNQ|HXJ-K85c9E{cxu);QYrl)9yqm>mabWP6}zG8J^rT(_g= zCMW79=4-T%_%sbo7p(1(FoFk?x3*>T=Jx$n8ydry{q~#gIlT2cpd~PVS)m!O05_GR z3~)_Jt@OO@bBPwB?8o)>H(gbWeHBYo^cn~o9Zi`%fZjMxNw9~UO9)p7t9McNZ}i1% zigxe0+3xVlLHgH&^2CbGm7x{K>*z0ipQL%5Q9j2R5KmJAa5Aqnuoe1CTRCGiyC*K6 zRjc~+jBCyiCE?Tp=r*WwJCFfmKM(L8sL=2beE85bG(#kxn?1KR4JM$Wgq33+XW~hySE<`Q6|5ha!J;+srpKc+O^M$%l&j78i*_rL!i__PY^iJAVu+A zFMz>vEOq=!Cdzo>mNpXnTcCYSSldbcQ8zR`aiSYcm;W$nl^<=u1IsvxQZFYb2jrW2 zlkDEG#z8YHD*zPC%ax)kd;I4=n8HQzHeCfjB`zYWdo!}K9DIG}&@J3ZKM}Or6BMSm z`F&H+e7-GcO8`$XBzL`v=5Qoz4t;&nIC#%BrB}Fp1h9yi9(F@n7<>4is2cHN;)uQk3}8(?$h-^Gc0uEc1^LLKuD>sJqxIC74lF z1(Uy|gv84PZ~3TWv0CdhnRPGe!9qHyq_o1eOHajebNk!flIc`#JcU5KOLzDcefbaoyVg`nqhrHnxFz5fa zq&LK5yZB;z?H~FhLfXzrY4w<$_$ z9PHeE4-uPo=d0nTUi%obBiK8!rNp=X4bB5_LHITYmIlR45D~3T@8n5o&l~{>9_D>|E_|0V| z*%A^3wu}F)$p9VW)}wg^b&r^MHepM2v{k$|4@{|W*>Y#cap5Ha4PhsB6Zao8USaimwFE?suqKfLr z`8356Elg{Ed(SD5uVp=Kj9{ID4vHp&v;5f~=$FDp94h!8JbN7v$tCu ziIKo1Y709^am@uNyE;1C+(g6m5^m5b?W0N-FTtvHF5A1K;8cC~e~r677jcp|f3|k@ z>S>Bmo}-r~v@W;`#LDLH)zZNqJuz`w^H4YL=3vm)*Kdd(PO%bk>0EmEAc<>ol#scv zXnp39%!6OHft@`CcklAVu|m!){_KwSJ4#XrX1U$?D9?T>-l*j3YHm~Jyrn?KU^q%M z8-2Re@brl=j(wkVOw#Smo=hm?~kkJepemr_^J}P#?MC9isj46y^cy-`ryIMOM0O&3q%0l zK6`PT!7{XWlHv4tK~ex9A%!MKRWf1tk%tHru?irVQ2?3dFFy|tgoLbZD&KiXWLG>K zv$PI>FSI>{T!Mzg-@{{LV~AtR<#|_qEl9co*AWO%a2bS=)q*VSo$C-MuKS6~k@4*9x$xrM88r3Y=qM*4{b&Zo}`rt8u`CyZ6^xXBq@^93iMNO!UTHsVNa=(qEVi zwdo6P4KCkYt%JOXMIr1t&SeKVqtq@b>pW<3kbW9^Y2o*k(PS6sf2|`lG~T{K`y*ZN zZp98Dy!D%j@opkPpkHHqruD(s31V268N{1RsfnpY7oJyNn6zk)8%YXU8#!?zmqrwMdOxvaHKT}DVDdE2tXAa)GH8b9`W%``|MVZ z_C-p&{Gme#{KUHqPw?#@U@zHP?6bulxv$m{hqBKCjI2B0F-5}%=%<_KwarBmq_fl$ zT6`zo?K%>5n@C2*hYPfj(Q%0NTnderb`3Q87a_629)ro0;obUaFTYA6 zUs3XkuT}j+NZw91G7v7vF}t5Qx2-z<5RF-(iP?+8j3DvI@wAtzW4uCClO}w)hAtMQPubjX6eo}S1FozUuwdH5cn2K@Qr$KSo%N|$GLCcqqf%}rs_*WqdZ zQAsJE?BP=@V(z{t(>6qwOgm+ughL z+mm(etODl^&FoHY9QO{U$>*FaG?L*HROVxStaMCI4+KM~kO65f{-6HhRmACj6Lj5< zv??1T329Y|fw+fT5QyK?!uypav~!GbpV}DPG2~N;whL}^jA5aC4}#Me{%-cN6<>`_ zQwc&M+?{?@;~doV zE+>-LXP>X{8I9!rCffgbDRpvh@8CRiOISiGHl=!RA>BHjJ6HwyH2C{r3Q_*XD%+t7 z99A#9zCeSOKsEdBzQV!3tys&yr=*nGUNBz4`#YUS8!cY`@M*h4{(lAlxl^-0(^cy4 zG5YoH-TgG=dQJDo!L2&ygzr0ce>ukShA}d0TzQ<3+0xPi*}F~g-hWA4J8!rFdw!zI znO|Pj%r1HU{0eBZ*e4tv@56y7CP+)mL`xgRs%+do^sUWg4}0hl!4l_V6WjFNH3|5v zAO1v@3@SFPC7OKK8wZu(-CtjxS$AfWh|x@_x?KP03i{bx;fSXvUnvx~ul)YwLq62# zj=gGHk`5;$vyl7@WojV+h+3Zy(r}0>pgMb4m6K# za-5G)5EQ`|0?<4$qhn8-V3&bM^!|OiSB~-8F!ocG&O(zhAMXo~dzU&tsSo;p8ZGa- zl5IchWqQ)|`9nSyP{?^=7^>a1Zz6e|p~6zxa5B?C)v-!q@p9lALvq2k=O_z&sxI%U zJd<+3lW*tFupe!@^|Ku+K^5A4+-BzCAO_isk1s3_O)r*~Re<4uOQ@A3)dr^QLFxQy zd--`y^`SB=G}QpF8Q)De!st%0e+&kZnFQx^8x;kG#h2Ukafit;-jGZqAn?Ie4qk+! zU?S7nFPGlCDfxf$kWc9hB80W^OT%`!bu)45HZd7l}mOm`*iW}{&5-d*L&+J zYS<=9mh}Z!vUAJX$e5;6D6kWS2^e_x6>ZMv;8=}n`cic+w|^>E6cj36-!jY=86SK2`T85eZzJGzpow=%}6Cx|@XletJiToBT?kN_q$I z*(V=8I#G0weBez4o=(6Ua9f$+PXP(4=Vw^s^c4tvkkG~p$_SYd4ROkwn(j_&or)~g zl9ZE|AB`27+VP~KV*Qtk%JTi9?}7EY(iDvJkdAi7pBZVz zC--;ha;G>csdd77zz+J}K61|%(h+bfqvKPzKSREZjm#C4JmNUl$OePIaydZ{_HtZ z!a-R*CH>?5o}c3nX_@We^424RhDu{^0QDXSO=!hZ3p9a}c;r%33-j}Fm6h_xO9i_h z?rIQbI7uYwm3W4xR+H{{zdl2N9VEP6fs=_YPMqPgWM#X&bY++oR0v! zup}>Uvx*v;FmS3qf4=imz`6M%PQ#a_DA0fzNfp*f@9n>hL>BS2bCj_%Y}_$BP>$3xg9T!_65$0{YRC`d+UzZ?Upm9;6o5p4CxFkLcdshyzYpAKArmh8`EqXk?GvZ95!$^FI zkXG{zsWrJMyQaQt-k+^})dRNWO8$}4hWiA37sXUxb=R`b!#)=uuWb*nCdrS5o$F*b zJYrJ7u8y#wK*Bl8NxtY!`;O&wZRykAAoLB)7!>-Y+bDe(GGqv`7IMlB8R<$j)HXU_bDU+R2wq0HgKeIGucI+D3+Dxb3Y^<0OV z2JxxX<#PC}Dr#yj4S!(M|J1lxH_Mg%D66Si@Ex2Lo&96&rT zE5bMg>T74+-aUKl=L(_7ZGU(ro+FRFWu%ly#sLTs729{m_pA?YgWKZSxuJ_jL`>N+ zVz|HyEx^G$&mv&d@|w0tR3Y_vwahtErOycCY8;4=@UDYG=&Q&^hFuWZsYUC^thZ^( zy*)hXzhJUDWP(`??JI5Gg z8W8E?*EeH7-x5Zq3CXB;DTKiKu-}r-F6TYroSFbpI6=pi)AlJXJu@k#X>Evq{S&~z z<|bw|_15$Um-RqNMr4PXVU~$sWa|XDQ7{oBV@{_wU~*U7^p}(>!wygg)L%$}h>R#~ zP7c7hkgFO)*fm=2m*T0T*>b0GFgQW>>D5^M4iBV=l3L5NT-Rh^-}{G!_6Z^Xs|YyBq>j@xq`$`Kxk+K$u!+eXfUx%Z%n*Ui=2mAndOvwUt- z)IC86eI0NUq#qJxX8exJ2dB2HdTY~yCKA6BNqV{6Xc(?P3tA6M)Sdr^9J6f9-!Es1 ztSh}Xt^+Z;jvl%iVKsJrT1C~9ll1<}mw&5(5Aj)(7?j$uuKu-RaM?ge+t+Pw6p7Gs z=k~kU(8o`!jz`&N_?oJKL7wrzz&;5Hyukpi7n+wbEZ5D>uHo;^cPm#FAe4P} zuRazq9G~(yzsuDz+6)U1?FUHo3iqq3xM7sE7>LT|&4w~;awzu=Yr zj`QEX#HUzt)$P$l`WD3gZ{W-WLIGPH@b_;VOEHP;h=jb59NBOj1FcZH!?D(}js<>ZOCU5=Il@3QXIuXVGQA0A9F)i9NSIr5=_H ztujK}3C121eTq2s=!X2lnE6MOuu~|`>hC}kE z@VSvU=*2b;qA>*zbI+E+$>RCOcjm7bDaK;YuM#F|>2z)a`~GyMFGHx(y@?5s8N3~< z;lcz5d;82MPuPowcv*Ik=2m9w#=HojchtuP$b`%je>@aZoy*oAnnVHhERqNzKYAE> z$PCfQHMT#C2Dm8l#0cv+pJj#n0UH9!Rkp|wp?q=k@UlJPc#GwD!d_HCs+rSD+R2l7 z%Gnu$p4GxZ*UG}tUMKH~cRAQn+Li_c535)14v))vh)nrv3`T>&WRBqS->Lp}+x2lh_?5r1+4wE0|t? zeUJlh9JEu{HoAY{X3n)%kxx9aU{)crPz!9Vk z>XzC)`@YT5YZ!uQu zX`$8~(mUDwR)MJW?mQjV>d-P^RmJq~^47dR{%L9F;-1xDS08UHqGBQv`K3QtTUbU( zslZqaW&~zSFwaWG*V@{marD)IsV#2$xW40!b4fd2*^iV*ZP|p-PwVg2&R3tGOL>Vf zsifq)6ErZuyVw4EK2^A}@d6?X@!HVa&VPTo`SlVcIxIE+pt>#muu~h0A*=o#qRW~< z)rLc3{kz2h`hOqyTw!uyxvaSpo=jGi6)7P`@T&nm%GhnZ!B43<0A?=i7&Tn=9487E zHvYa&?dB6mmy3W_`*f3o3Tn4qX|f&AOr%a7#CDB2s7s?MFsW_~h-%A5GZdPj>CF0% zqO~7@XIeb2qfwM0WP*Yq7xZ^uTI52<->W2hyO&C##7K&e8auu_NN4E)t;?UO*fDh1 z`}A4FzsF{A6uxVWQWc;OOU>G1l}h{E${n7{Rsyrc8>5LX0ir7q1>jic_aE4J{m9Mv zp(6=~6a$%LI{FRQyb(}@?*RtLPnQ&^m|j^#ObgU$yxKM1B_pf~Ym#=v_w+z>9F)2_ zSn6+$VR~DXSr3@C^}BV!k*{>X(tmRNjB{=kMKrU29aH4s$p-XHFy2vS57lpMjyeI$ zSCq=FVb_2c6k5~DC7qnD6kv#07(7`Du{T}jt_Z_J9Q%s2gps7UYH(RQU-ze>Ha!N0 z0o-jJ!KfMoQ5+?@hM+g)dX2>1fRV1D0_l$ln;vYaGDWl4t>?OgxVrE6_85rXIxoNZ z#e)7|I~3j!@%4n-xQ-2sLH~&4jJt6`>6aV`4H_@1*6{oCugRd8k&^mT)4$5%xrB_t zmVFpZd(zzeBABuT*AL;=@xd*YU*$%{xNr4}?8QIh;r|6}2AcdLzV@$79@sWPM}GZt z9w-b1=q{vea!x++gNv6_EBpN)Go{>ao!ozPTToM+95CS>8mJ z5y_YNR8o%v)B-@IZVogA<@{zrGC|aB!h=)qSUtVtbLKqbyV#hz9wN;*VF%f# z|Abf~=Wz~50>#QtdnaM%MKQ`4_QGA>)Wk%_htxT{2fs^+jqHjDAWD^L|4J(iJp~;h z`V7QJ6r$8Mv{Gm%Rr$-WmAC+?9>y=h0JQZw60evlYh82u=!W6$%~BNlzZ*Hc{gU$V zDIL|-MOLcImGYz)aU;pI#Q=P*`=gs)_CNu@fSaEzEjR;HcFe%I*zx3cHFeo;S$R}) z-%u>>LHB9TAOHFT9Okzb9iA2=i*hj|4l>AF)I0g$`>i|sG%JkCxnneAU*VMCsh*R+ z|KcykW5C-dT8!}`cnwFBZUcfb*Z84Z0^Ub=8r8*ggiLED^BO6+Snlu~{%pa^F zkc=^NNoD`5*QXmPtXG&^&9Zi!tS^`_N5s&l3Rg`ntwUr@?Po$1jL4$a25pZMknJio zVXEj%@vL3K{cf=P@4^BV%)Dm#AH5GDcaSyFV^7 zQHze=`pi(VF0L(&;h3FI2bfttWs)^$u@SgyHnK;>GLByVkwnva7i^f+&^dn8>#Rs% z0>V`=ip=ks60>ohpMFfA5`%Bttf-y&INE$K@uu^!vTq)BeSN9w6DLFxHhM=}Nk)VX zyh#)%UQ&kci9;4w_6P?EY2zJ1fjXIXob2{F`31M~jOny5Vbeter9qJLxARfj#f=a; zF*KmotX}JkyDMFOUYBdBNH(LE5}!*!lLq?@nClaxiBvH!8*akH^3B{Gv8VdZG8+^e zK~#wZhmA<2WZ-qzxsSzctl1c8Snxav`saX4>Mh+}youbK@j$@tOuNl~^z^yR!osuW z>z@X_033EmcVXjNaJ&3{6JjxV5aFD|z=zg-bTYLsLQHftw2C&T_{Kpb5Ijrk`Xo*S zYiopTFdcyQhb@yoY?q6ZplNy+yb@*-Pt7DMuUD&jRefz}P%|)p@og7QO8?<_Et%Y| zK}p6Cef=68*F;epAr`rpO`JhJ-hx0V#p}5i?1wS2o+$AXNizw zpu6m~YcAp^#`~cb$*Bn#pnqq%G5Gs#{qXcAhrP@7oYvKXT$QHm)O$K$te};orKYY9 zvIY@$05yb`yNzbL|M%<2)&+2v<>a7qj{%8r9N}K^H977WA5NUt<-l%zWj~B-rCc<= zfwPxFPWNd8T2w30)7}Klpqt5?0OH%9Y0#;g1oA>{PEd%(9+<9quibfQI?qmuP~eVI zdG2>2G7A8>q5%Nlvmiw@@(b*sN_^mV0CknS{N9d`9WOfCPNV8-^xBUerEjg99j)!l z(mScC?fPZcsh%K=!LX-?L;m)>9X2;42|L~iP%cdM9jgds*8mrqxoag;FC6n$eZfC% zZaFE2Qz{u*f2Jpas{ssWFop=@nXpb!LSwTnMidGc#Wkh}LOr~1(Ritp*mOmJX!h6A zniFOgb8cD25X?n0lQ8ecCN>vQeXWD|dvA{`Kv$ERtnep|D^4}dBej+xiw$3O=^D`j zN!at}%Uef&Oib8IsA51>$m&HUzWbbD*srZFPb>qtJ^mTVqvYwIIFq@)jSYHnF_Tuk zWw@9-yI78j#aCbz@n4E|y8nMO8YE^>O$E7zjX&Eqt6 zk`&CR0(7cr9kHP>sm<&mL;zUZI}}m#qT-iX{w$?_n$PVR##D57UsVT}%p2{H=EHPq zs=w$DoIiWk8ipK4@JHREZve6k)f&H4KB{)c&jGcfP2)yySyA@mx44ZVC!P z+>V|9?*+KFZu#26(9|?yv*>R&F!CB3GqTZ^?x|;1Bl<~iI`Q(=^+b(oYtT0K$5Ins|E$v#pr6E+-BUi^Z|&Uj@RQ`si@NjtSSVUgjvK-x;1(Me3Gd1xAs<&hI{s)X{V zPrta!|I-QPbabbFMK?L%PQoE`95{;|re^BMi)Rzx;esCZMqC`Pnvq*hLU0@G)$0n! zB;&L3)U|EwTczSwTNjc!+gZsC1N6so+ zXVKl_Zz%47QOB zy;fYlYqz>I`L#m{hCk(JR$RLjqjbbJV_rM+XRD%XuX9e;8gkQcJm2UCjuWR5_%N6U z*D6`Y<1yzwKX?C-oel065ujXR>ULF#tdq)B7KK5`qb}xkcPFmm12`@X3j?P z5Y-~U;2B`~_iTF!W@(8gzA1QrB0413OTk?x8TS~-3=dCcSei0&ooxQ0G#B%S$LA@I zO_yZ+<89F>nRLImsbNWfuUEM{ywE;AGoM}3+viWnR3BZu43vT0>C>kXi;X*m>46kc z3i4BuO3hzze33D{+yKRbY#D5JZotsck%)jM1eNdL&*n|ed8wp;hB-YjN&(Ris z!>q&a)_Rm0)SbNd(da30wfV1*gCy?Tf)QI7B0uifsSY?PJ@=mb}il zl%!?1spean_#gTL+d4j0h2(Z;(F0h_m*ah9KYsg`8k8Z`jYsR*At`L6)(9_A@?)~REs@#mRn)C0o`AsvCsqidKwd-rSrn(y3} zSg5FciiXy49m% zA4>rR(W3%iLvorX7(K!en}H)U>gBj*NErZ2u5vH9-aWh2mm&Y`*~kb&ni!m9EH>h^ zJKaqgQH`B&5n&F_)OQK@j|zu+PI637yo{8h)a_iEY^}R| zu&2gnFjZ+Qy9OqrM`j_5H$FR(G+t&Vf$8r_brW6+2%+3@(_FR{!LLO%DV`f;&J*;j zEs>3X!#$grl;|>9eC^c`9K$N+4XIl!zFFr~EHlr|UAl5*6%x7+_Y1d^^Js5f66%vn zmhWvZc0WZ$o%0@z^4GXKX)oE`EhumR%Fbc}WXoJWlb!Y@j=B}*gN zI#Svt4gM6Bm&0|00e?;%f6|VKO@D~7{h(Va%Eff?Me5l@$Qn7PsKQWZW(KVlvxFd* zQ!CulfJ6Xw7)c*{X-lX8*E1)sI8%jxh1t=Ql1V>f$~;%Th16vq&8BuItNeG@FQ956 zD)TTOSTctoY6gPQnQ{y6!@LoV4Y4ioj>;x*;&tSI7}e)map@15Ie3|>JUEtpMo?l; zv2jiw`SWE7;A&=B^i@(@Wcv?z3rR^be_y5gd-zeG@`M39*jWyk%V0Wi2f0}VhU z?K`wJO-}^!2c%8OB&HqY$SlyvHq~Ihl>iZ~qP4X(loqMSaW8CGLMsVD=b|1w#ruvf zQiyNvHnz~;+Y|O=Eb6Dt`_gjriDIfayJGer3)gP&pm2$m;4CsP3;!*EmjVkpo&(w2 zDmR5ak4^=KfykhD?3mh_mTxc6vHa*|mEEO!GCJ+|+s0Zt-uvJ>HwhBS`=0&T6o2h> z^^YI7is}LrD1Y(ZB8U+1oUi|h*N*5LH%!2=k3$d1xjgjG{cC1|luq0A(IQztGt2Za z#XDJ|F`8GI{+g>S@4WTv}jwZ!Wj_JoZ9uhN~CwPb*GPphN3lV3yAT~1kfCl-UE zA~8k;K8CB_pvvL5DZa-cq$bTmx}_%qxZ*F*a*Nq$xxGx)YN0aBNsS}3rMUs)31Q4w zN@gdyPYdD(n5ZjzH5l73WI#fFvw|-%OD8;p7-)%Ti9fBJGTGxXC2!&pkTjt2rgfSv*@<6`l(YwW@?I5%Qo`~ z8yn~k|K(Y^xVY-0r7%(j8+bj>2@8ui&iyN2JWV6c`TDY-+e>zYs_FS?U%X?;H)*IE zzI-*PA!?ROsq@7(#P4uK+c|wMDE;8}+<#io1BaF9c1GckOGen+mG?+nKrj2H)z8?TW?$vS{ktz(!u>fH}-Ah~q(e z4C+CX!^q}hEM-JRf6k*BHCaA+JfY0Q(lWoWu02OnyCst zdc9bB^r%<_A{Rwp3x8G&Tv50}P|NN9(9?5j(>Q#PZ_*c-7_P#(7n*s}PTN2%aqD$D zkE>~Ed^twRukJ|!jsl&E*f8tQOWg*Vm|>m3i7~rm*G#)xLt2Q()u5XdgBCi*Ko|a$ z7A09(RKyzlL?w#vBLJpy{By6iuo187zXCgr9pw5& z>X*WsW0AH5G(&8nVQ+06nnr>emSj7o*jMoQyj5bnL$==uRS|~^I$&>|f`KmnaB^eD ztWo5VET>chJGpk=PhxMCz6!x#nwoYP{wiqjIXU!eOrNn8pg$s$OtFk-srhz;R%o%X zBUNS7@o401F8tz3??5QxwMwG4A-hNv%sj=iNv)cMT?xX!Hn`t|YDUP!Hq+f*hPo(> z3}ns6o}O0<_5wt^Bc>p4b&TOax8rTkx%D2@#NLy^A~dos8`A=Q?2$~unat>dL56Fh zIdqB$MQL4Z_S}vTxcR{Yk&EmOBRnr)lfxb3xSk%2YXc`Y6ImjdlGX8eeJa|te+g4h z%yt6h1mdV-YV(Yod0q2U3w- zx3t_5(Kt9;HLmG~z9AlhePHJrwZOoIQC?`k6{wAMb`q`b1Lwj0{)Kq|A3Z9vf-@~X zH@mQKi?0-;5{VXf(tn1)58l~RlgbYPr0MG@V*{fMCGP5roziq_(3H?@hjB~hulw1v z?Y#04+{6A#H-w)4{d?ipBjjsP7=lYseT?J!kpUA>O0Z5r7&UCG?xSWa?tI7Dj;9{2 z$9-n*@|MC|3oXjQBY-JeH9Zv0rzO0z? zO=DBxW@b>5?Z$Mnay?i16mGS)^Xl611X7+c<6+2o5l`T(aUzgL8s5J*mf_YqAov0{ z@c;C2YSV{yK6WVp6HmRCD(E7+-?IQ{oHPmgb4^ku!MGIs&|?*z?fQ* zv&XZ<%UQ?TYETZ#xn+>m=3$vqNZR^Z0J@ty^pC%@vV^%kr1wcY-$oY-*L#Cax z=E%m3VG>qvOyQ=Ao5{LcHtQ|t`rjMgDcI+opAM1$THjF~fBC^7WiOn?`5JcV^N~!FUiRnAYLt4fh~O=VKNU z6CDjQxzN>K4WU?)7M=j?3_>aQ4uJX5%e=3Qix-X+JfPlU`} zy+%1=wsF3g(N>V!j4o4JbHT&760?`f2{5>AW>VH_^JcOb9V=cZGR&hc45bpEhjBREVcMx z99CMJ`Tji%gLr9eUJ1@*pre5q$#ou4{DZo$fG8WuR&%L;lzT0&t)z( z)z#x2mJb%$`ncnAJvN@2M}(^~QAC_``*v%|?aIRKVO=GLl(^{#&6r{5d~}yVE3U|~ zDw6=~M7$|ca2#xi)_LB0$8I3kNPj^$;__D7Pu9$j#mKqx-+91K;2H>FdY3AJIIp z4$m@;+R87-F9%s7&bbKcvTIn^UOxD9+p7_uDEkrJ<~RQ)p>jiYJVu>Hm(0AK8kWWp z>*6x@_1;uXzjE4f*;e^MugaQBYJzN_4nW9|&BFecUWqXWbT)QeS+DZdMcjV4brQdq zdLomWl4{#D$6|o}i8sG)`|F%##163oi|{)O#>2m*JF4w^y73!|4z+QAZWQtZRZ%4Pg;XU>QRniiUPey!}9iaoGD zZ=baVW_ddIJIm;Vldsn;?Kr^c^M>&r{b>wH2qSkM`qcgW2 z0?MG*OwFCDI4w@!&of8Z77m!)Cdno1!8@X>EK&1spQI%ET-8~yFhyLR=zG2$@VNYZ zlhgE}mbGm;Ih_&wZ)No5f9p}aaUJ}BBehB-V8Lzw+%i{=mNSq(5NRt4WYsmsyeQC zIDB8v;UBqBqn~~O4&It+|IGPQ816XohNH^rZHC-7#;|0WJp(N7_$4Vs^vJ2#=1bVI zX*RcaPs~9c#~=j5=D+%nj{O2>qE*J4eR{KlQ(>aHQ#O{7YGz7p*U&Uj=-`+j!>pSF z3MO2-ObtU@l?|F#(EoV%-uEa^r0||Pl_)t^ndD)gqKH`32Len<;_%w<+|R|F=gUZX zX>Yq;LS^t7h(4*TymQ9G6Tu1qZ?-c^em_Akn%E^9=qHkrRA=Wb$HTpO9hNBE8N11MQ&ZuR;vmpr!r}EJ7}`iCUO=7;C3jn^5K-;sFlKCk`n@KL zM9>Q-W#|Oo`ryC7Y-E>R^+?y1eLn24<@@7VQ+97Z#4nH*^Y=%ob5(;*M+hy;cL=Z@A zrQr=@v!J4WaBE`lfo>PS|1%o!eb2X1!xXo7YB$l&5J=+^^y-xtXPuw1nlNh;O3VuA z`(JFUyF|14`}Qo6Zram!rDFTX+6@y1&#pK-Yi1g-JG>hwIYXtWvP#xM`CQSPs$x z3t!K%YtYk#C=D4PjgUqkCXVL-hJBR^0Rxtz%nwo;4+x=la*TqAM(@tBH+TQ|;M958tj#Sw8o4JDq z$fF-dXQl=7RkVV9VJbGQ`0?@FRM}0hgDe;*OA>jelOg><>8}o#2Z+@-0^55XGqvT0-=B-$mnT><_9|6=d(~#cTu` zrv4eIdy)O&TxDExDgI2(AvhuN4}#7C?||##$(qRXOB06~je1%X{VY;-uYUl&26|L{ z6yLRoQHzy!X==H?yxW)>Vef%n{j-|+w%czvd6~kYGkM~IqY&mOjLH`O4G~A^ z#}}R_;PDj~ZsI?I4=?ujosoM&Tt(VIlMQDdF(Qe+7a%A6rU2tj(bzv|b?#Kb#>BK_ z4{h>!pw|<}Q%USlwK_u25w&)$-86YS)lLI!LO%aXLQh7^yjfr7*0ep+F+Vf7z+H4$JA8cs35hE=JpSjl*EbncGqMQ3j&6MLbg` zHLVAWC_3U6LP>a-zl<+JZmE`bfcivsUI*_I0`h)JG&qyzD{U|Y31@2@at5atSl|TN zy<~pq@P`9kN)ux~-w%U}{g^T-V#R{-*?}OkUHMo)o}enInaD*+z#cvSiD61NJwa~= z0gDBIR}t_wehDMCw2X`k&-Nla{gLbJupL65yE{9X5rwnvS*m{%QM~Ll7>m=5@lrG+ zLYNV{?}w?B->Y z1=zJBY_Q?v;aUORpx0wZM)N+!ET`v>{jXg`_`I`+ot?18_|nEdt12lt>$J2KPWE6O zH92XG5sIE(;J7O+A~^Hi*#5X`i)Uns>%!@PWNG8m_sYx4j^7@s{dGt6PEh%*aSq`= z{wqY)6lM0$U_oX4M@)odsV2V3e5)%f51CIA-Qv&A!#}B%m1*Rp z788CF#;`QaBW~(?dLLz-dU$BhRbT6LG(Y)_C@6w;2-u3zg<{auF?lFY;@k!LVFTt% z26>+`P>#6e`*P$Ej+Ey7YS{kyMYFUXq{pe9q|ng6FTPnKX5r-}m(Yot=5+M-1nCOw zenf=++#v%N;qKubbvI-K%tiGtUp{;LlME6J;$MSC%27jOo<|oVDI4UsgYbJ6jG7$H zG}I>K+uzMTiakzIH29U+ zC>I=kWe($K{&sP7n{<^)VCT>9_YQH8!_p=SWp-13He^XQlA<0lvenq(J8GQ2u)!I{ z{9fwA4^9GEPnwL8=@t*+z5@IG2rh&!`U2Yo`2spwp^AH$J+2MAu))SHVJ8>4?_YsX z_Knzw!97z5&EOi)?oCQeR0#L&7b1sw?8;~N>M~l=wncN%PhY-hEc8}6T=_lFV49^f z{55?KnV${Zq9+rM3 z*JJ|d@(`u)iTDH#pz1JX7Cm~PMRa--6JuYljLz@bZkC#wZ;xbnmLW?~E(0z<3i3-= z#^3**yDi3A_2M_oBkY|!V$Nf4eC4EIY{Q+p2J`E8EW|BKT|X8&pqF)jaZidR-7NH# zWF@Ps(OSU0XY3V4vP!QQg4e%k@p02oM7zxZQwKfVp$*-Fg`GFg{Z9+b(dE@1_vqKz&)Psr~rkf#OzbyQQDZpNdeEl%!^yO9a6&F zR76wZl5wo*=Y6MEXgo_Wim3dIsZqDJFoCGR*4B4bE<5M^qVNgdfyZ@XlUZFqT74A; z_#|IL1VSHLx@K@1%B=S%qz>H?@RCNEjlFOPH3T6-X;LmXN(_r&XPPK@f4!pGSQwe< z$5UX3nZE5)msI~JPo#`Rk2m&wy|gXkaqL#DSns|TkWMrRvC#MM*x0#GR^eQO)pLHS z2*7_zr!5CSFo~|kS-ulz4kP4-u@-SKmi+yTa4~!ruQP#Kh#AFo*zUQU~12lg!QwxyUWyxA=?LQ^Kr61*U!6=3F8)~?D>rx z<&*=udHk-56_LVL@+>^0J>@#5}u+IP-VM2sI#L5@B2hgL0P7!u{b zSE7-!aQW<}V`^c_vDg0jr|);x#W^bch{m+g_n}HzswWuKTi!Z~`GZ6Ulg8}tlXoV& zpkSF+7mjS(Sto{~>ZBDCagnPc_wPLgK`XzEKmZ^Hoalh&`uh5?FluN&D?dkBU>;`G zlnllFHz`f{^rTqaU$R<g}}VvMsYdTptU)Z(Z`+6TqAa_8-5@65eU#u0z=p6v$NwZ zeKkP4~>8Pg|JjO&BfKDIZS4Std>|&0(w6?84(c(m{zI`AhbjfXV&3(k9ctK^dW-#3V@-P6rTPUw+A= zppjvTK-WGsj&gz+3#`2Flz$gDH8VFYE>~Ri1jr*rsbkb~NB_J&fzlHPbE~DIvM^J4 z^js<~&?F73IZp)2-VP^Q;GZ~=eMv~bH;`w!gVT3em_{R4-aa{nRH)#>WU0ATYsf*Z z&j2?5e=-UDbYu3SrUbqpSY+^XC}0qyx1jO32Ogs}4aw@;5w2kuic@?!NRhJy!jL^!&6;c6~y!%O`i<4T;& z+MY$;ZHfC&;RmX){n?*^o4H|QLpY&XjX!PouVvs167N040tP(S)ThHZ#n0A%`h=5J ztK)#cH@Ll!*7yD@!$*c{<7a?k+-EKz@K>tZ9RK83OvEX1FJ)#m6BC59V`UfCh+@#5#}4H+g>(R=1ZH$hZED?%UkBBa>m&W72S` zcGc$#dE>P#CuwxXaOh&b7%Ec$Y@+=;jr@}Aeh~a0WXRm6XFX&PD3oxJgP5HuBO}Ai z6NizCa_!iusoRG3lqfgi4?rKg{15y1jcN|QzfZZ2+G z_#uQI_DKvcuifxIdjUnwzo1nRsu-#NI>@uHXKGRYi4(jGC)HP7lU$vsdp778!K0z% zQ8q8gH}GaD_<&%MUK7xyfwmEGXU>!q>h<8x-Fxy!;3gV(TIrKx??Wu*&FQHN?GLlE zzHM)Z^#xt*&($(-Jy;g#8CpsnKc2a4nt!&K*1xQwp{hi5xi^dX{WuaAaZ&WWi!qtn z)goHvLEpCM>+6dbIHl*zHYbICg9r1an0G%*ayxNeLLI@UhM_YCO(!!_#BRtUYwz_Y z9W|lxyXgsC8^*P@wHWnDjAtd84j(#%ERi!DG&y~cjO#r!Ue|1q*${^{bzj%vh_>-VjEwhZ zbRk3dRO=?;^a7G`x?TSDdS^@$kEFYjyl@98pOIQmS69JX#zX16C?g8IFIcmCCMP5~ zte1@~UA-=IfqLiIugDIg_L#Tep|M6LR1ZR$*So%3CW=SGAD`o%b9_{YhcKv)MlKY7 zuSf4LaGj1mX!F>&!%9u>K++_ANf>pOu`rfn`k>x2T#==C$`fV$4!ktxUf5D3VjwjBgMOp! z!s~1-7=b7`wY!;D=kqeWC@%<|)+zj=DsdSL*0ynr4l&LBRmKsI%}{|{$=`8pr5BEZ zk&%&i@7|%nJ~Pe`+6u|lDl}bDCr}|;?b<;&b!rq2n4^4r>}!sHi?^$P_h*>b zK?Et%;Jg{Fyp5bMj8Xs*6-`50GU5OhrKE4d+hr06foUZyx($ncF1H#_+U{aEK@^r_vx|p$L}wmgehyF1ri*ln>*Xv+dDgv_cMf1&vd?io@N^HDAX6Sv$OkX zBN=vdw&_~A#+%om`v8ahoEw?Cn0XOP=B|jOm{2$_pI27wB+$D0ilx7y-HSj16uBov z<|N{dU$C9GiXRB+wX1Zwdv^!HT!td1!{2nM#A2s`VaLK6<~ur$?7dDvzbYn{2iT^cIrscWt)QN zI%!$iSx24^x_*sTf@L=(Q9#If<3hLL3bX(&0<}IS?<)l>C>`uutK60+(NpI1wf`{? z)GI`M9d9TJ0bs1J0pv!r&EZfNr28C`ld}_LBoyru-eq<5&=r3`DbOgyhC`p4?uX#5 z4lDSQggqH{;R3E{`nA3pphDjNWw(%s@)Iq-hu!xn#XElEQU=2cm_napv@|a{{R6ew zX&#!S5$)6s4EW-}lOA1tZ&1lC$+@q7n=+M;ff4 zG-&WTwJG(<4xr>RR~O`@k?|@ufW$o;^TOCfm~h`A17`umT!@@}XcV`Hn!2&dSAm@m zzXSx7bk0+OSAS7b_}$K1Gi71aW4*m_ODT5Bp_4W=9Kx>N8OAwCCITWY%w~z-obkoQ zq#KeS*p9aP$|G0)rZ8p>hdg4S#XnL54i9IdQ%qV~9hy!oJac6%Tjkw^I;uz~-xZd#tuU*p2 zyT6n)n3Zrr!N;|Fryng{9y<*MU!_+{_nEg0xj7%MJFu*+d)l1vW!K<6;AHq58n(E& zEGd%KeY1OB z^IR<3HSl&mb}>21-Ac}F#YMcbWyKg6m9~MG9D|YK+NBwFkz+hE6bYfhD&?Ctz(L{j z$IJ~rWjD*dQ&iuDfc1M#%@EWgCM~U|xisNeWhF>K$gI>nU;YTxu31x=Cp`*p_o8-) z2O!qz6XvX#U0>vJETi_XeX1UXAc(_K_V;yG5g4drWE7hIdv(=TlIhJ4Xno*(v!Z~$ zTihR7W2^DAwpM58A1LPTXAJD?JktIxKqzE{?&qgZpB^~Bc1TVArL@SrlkmmP<%dZ? zztG9}C=QJPRP`)VB!mdI?Q6mQiqB3L;L|vF)mZNHS*o-ct5t1r#=wL0^eBCfNcan> ziP5|c{ECWD^m*_Ll$6|7ag){(|5+?!R{cznoq836ZXs>F-Qmw-FoF+1)4@stb>5xkqQ!s>Jb7 zV3ZJ8f(GfylLz%B;{;9FCK?CVnNR8x&kg~O;uR`ui~bYPz!3)vIC={3;=7 z?~w`DW+k30VG~c&75M9Wg4x!a%k@fC=kYjh5nCih66Xl?AQbHc_21)EhU zq52i4kddbIUR!#GT%C^TLS70O<&Ks(8|C$DKNp@^J=s3nWGt}aq#2z@!Dw=)W*7l` z*DJ&YZt29E^*T_BjG!4JAWS9e1k5A}ovpN`ydozq2^_qV3!;92VJ&g}^|8>W;i=%X z`}%mo2l2asd{^whx;v-HOgodMipZ`sSNy%PBK+un#cie}L;~PQ=$;updpk36a&OQe z%@({rqvhB3wbFpX+5j4?OAN=#dbUWN69-+jh40{S_rvlMyJ2b96F$O+Rgs<3ZEb|Z z?ure0+akIF_VC-3jDbAm*Cw=H5i_=-l*?8GTl@9NI6}{{$yjR+ZX+rv-+>-b;F(6O zJIamUJEF5x8qAOLyT9QpJuYZ}2%u+KHs~4EBJ?~VdzNvT$IXEr<4)QuEVQlqzeu|9-g#Ld&9YbR ziD!QQh@Jaf#p5vQ9SNZpj3}4B_70VBoE4hfNdOn+Kkp#NRn1%FgOW^p>f)FXlat zah9;){B7}3FvD^h+Y~8D1dA{+_x@>WL#;wYg{;Tw3!#<7%^U5;3(|cyp8y1W*vyK> zrPtZUOD~&RDa7DdejY$=cg}HNGg5$Ywc1OC2~~D}Zx#)^X@F&%BrRTosb$#RfBd*! z=HbVggouZfjEs!xgX9Fz7!C^Ri9YsoIQivu>Fdjy4N4<6PE0!pj!|FcoO8lmNyrIb zN$aomuu`|+>%goDQiQ){@Tw5K*l|dfOdLM`GZ;}dl81m3nQ?F#u=nGbotbe%s33uW zKwGR4CRTxwcnzDP>AlqtM)!O}eK4nFVew}mLVZ$uVf8x0IWXXcp$cqO!v7+y6Oi6! zWkq^O$!6DqU==7uuM~6YpT)I|BnN3B%0K%_al}jK{qhhO5Dw+};kZ4bR%$EMj7x_>|x8;Q14pAGLB-FF9SEcoP>T7(1twUZQeQ2Vqb~ zy>pQ1e3N1gZAFI(zw9z~1PPT3dFZ@?TBL%O)CZnSnUp z5F#k#^&dXz$Zeot*#X6YmKq~dDQDD!mUZJuzdG^|@T|6(AX4s|8~hIvq*r8kkTO6^ z^F^!WAy!tnbP>+mQgYHj(Q)~BUO!+~eY(0ieI>=``-Kid$ysAVbr?3s)gPDMI|X|+ z;)zI)E2MroLfuP*p zkr7i96YNQ#=^sAo2K@~VJeYo}$o9H>y_3lM#{pFhn zOL}_3&ty*v(ZlB4+0&Den5h57tJ!vv^iEf>_QxAD%~Q)W3Agb=|M^4eo-Fir0OD%B60KWzWeRy>KW|4u|Xf)wbUAwBcgh~$8uP-g;;kTN z`54uPy^TlV|K{KV$f#-5(P^UmA;Dj$`36aC=`lZbuDPdqhbf`+bk!&Q`((S@7D=x9 zJUrrAGd9;_wxLlQ=I6i{2rONg*AuHs+Yox>K&l*WdZcqGs0AGuV5?{#S+4C4YC#~x zi4$w^iO3@1$y8>{(iMQic3j`N1_1@bLUYAaA)^Zr(p)VZ%uMGFY?`)3`%xXx zddq*124(ER=Sn0CA;XaU)F_@S%)G;d7whQ`p^9N*N=~jGdogb><=X`8W?E8`V(CAt zHU8;`_?mFxW@)%$wA@()to#y9cYAy0MEz)~(4)(`Owu?BQ08d{J`^*$1qE8q6u>x7 zs)K|m3%O*0=RjZ&>(&)U0_Y}~KRe#`Tu57}b6%|=O}~F{3#!^2_dW3mn!u+0eob&z z@hQ>-U^k%G3r~TD#?4P9KIv)LbOyTlZg)xaOiWGvT>VQ-q_i1FRwtaL)^n84x%osy zMyu}%erJ7!`Hn1WY!^8Jb8Z~b2KxXOI@_T~95A<9yVi}2jafiYykv~#1hRPo0kYM1 z!eZUS*t}mYZpB zkMKc6>9IuMV#B-fQah(l%hC&T3s`|ALL-Svq!j>sfl9dE9VxkU?TH+B71o_fYR?VDg6T^5;_);Nr z<>^B;hr9t8$6{i*=tC_AivfYWUcu(QDfZ>sM?3L50Xp;D8D(h@yTg?svBN6A% zp|F6)@ukLaK_#^WH`f`=P@|M@;O~6$I7-6?UBA7x(SPZ;`d%_+ zoFZV5!>Wzu_PptSRvBl6Kpj;%^4(ecQm)tLB9Q50oz~Wr1I0Z)W!ZR8zB?YIqjRk@ z{q!!N_Q)Wmbeg7LQ>Lx}03-&~7E&w_AmzrMNbH`h3NbmF3e#;tF+>g3!}HHMjO7}8 z*^T!@3|(y2kS<1+uUB9(cd{^tXJEhB$eyPK#{}a3d@B8!oRM)!dgxyLli>*Y-}-8U zrsYiPU{$824S)KiDrOG-F@_vAnr?z^hTOD+FK!u$3!;ZgOiAf{Ql2sSqQ#u~rSW_v zGPj681$YGBt`_S3+`u9OOdzkh8)il!F=mdpH?j;ZCr+;R%vUK>f?nBcW8=6yf@pRXTXPW^beh|JAA({RpPGlApru$ z3qBr=c)vI8?e{NSDP`2SS`S1octtb(nG<7cV?k_C4oSpMD;+6^X99ET0CN1`B#zm+ zxw=DC;#pc!1qU_mB$VPz@M~(cq}X>@dL^&Zn}k5UXWx4H=Iq7l)7rPu6)^})HLMnq z5t!OP4;1y^b@Dtwz>$cA@PR@DWipiL#tjxgGRx*E|L4Wk!& z{I0awNGf%FlvLkqtqcR?sMp3E0A(BSvzHFu#F>EMfYQ?%^#qjEacTi7yJCiUwLD>_ zg!}~t@_5s$U>m4@IB|C0pahSayOJjhC)VJBNHOYh z2Uo)V+0O2x&%bm|L*y$i;i1`FKlLr65X2}Wza(gx1c>+CK`7xtTb8)5R{ zJiT7yhY#xgDOu=RKq!9s^3!+6Qbr}KUEyc0KuCmZsF#LkYUmM(#|C6+K33RNxqvMx0SF zv{B6}Bu(ly%f_4ANv0WPd@p?o*MKiO+BxLvkO0Bk*(t#kuC-5e&<+yp0C{y(ti(7} zjC4G{MrQs`3n0m4u=l6ER68v#BnnzIv~7#HYF}4kzu4DVh29t{MC*U_w(-K4cy^HJ zhH@_MyzK9EF^XEPB10$t(NB%5*RHjVJ0z-bC3G4{-h~STK)V1XVAbFW!$vtb4n$w1 zklIM91?%_Zzl`^DxK4TfINhyJTF*IBQqqoe`9)ry!n?!$Km`dM@FD&nCnXeOs@F#e z%AgW&Op~{owss!`Z+JaUEmjm29W^kqPX0`_90K9YJ4=h{3xaf%XpbSgAOcN}_J!a^ z*VomJ?b->a?bqkXQ2r)ms;i5P;f`CH9v&VYw{A3s@9HGn2h_H@jFZra&{ytg5P85R ze5-{U*?y8@*V3V$5baBFu0C#AP(PelkfcUp=;0yL`ULL`!r>p%$CY?~8=sH7BQ9@7 z*yX1f)|fuDNM~RNXY)1ed7&cu?ij^zVM5|I9(6vx_Kpr9r!Zi2iu$X-E_&=(0Cg~^ z;WJJoO;4a+S$~3q9;SVHdA2qOs6l z@fhk-zT2cfAu<{(PeZ) z_}=#BtS6n)P67sLU<4$7NEOlm4P@uS5^gT@FUZB}3n)Ux7t|7Pl^FmK`(|8URYgXX zZ@w=fWas@a#ax{t)4J+{f>6s{Fh1!E1!}$r%?01_Z7yX^>ABfyQVn-$Dvr}4e4)== z@H~}s)I!wE6)C&)(dc%T(Hvkk#zsaz*ESI9J&aFp0#+c`&r`a816oL&Xz1#0 zBHk?9WB1mtAbI0zr`>Yc*+2|6`gI&q69zL+2TxB=a4Y{a?4NK$L?a?%7@O!RNNu7Y zB+nzQ1oa;NpiI1vFn)k84cwZ~w}>T!T$aqR$KUm}jBA@O`3K%#VEwn7kO@=Vqem*d z@m0Rh?1j2sE*mFZToF5nlhtrwjy|TEhPv?a5ekxkBRUI1{qNp=bFLwB(%$?g*OL=n ze`6)&;K%lD=vPJFh?f9eouc(F)0;S1PT!G-Le*0236t^#FZ9i@wGLL^X3Oo_d6Q-z z8`qHoPX}|kr$M!Plg3RSiZmQzvH8pNIh4N0M0o9W9eT=zHK<}2Ub>J=?V)`&8z@Q} z{D{H6lQd9OnVrUU``;ST`W?-{;wiT+GrGPv76#IUhC(eqxYi6JHsXpsdFVrlL88)W z^8h~Qk~6>_h&cnq2Z%nrz`0-vhYgCBr%&BC<|>QyeoFMnmiI2WaA6~bb*-i-6(uE3 z2fA2fw*_AJhtPDm`tB2|I0be%1B3(we0gIWbQ|}7Sh0**J~p*MK6;|=x?}yMQ~jin zcEaZT=0aJ0O5EyjVbI9ECqJex{aR#B7|pa{8Rftus{OIec3u(|z&EGPox#0@wITC@I+P{bAP!b73E4BDW?iPXQ~nl?k&N=WwA!}W zn3$Po$j264WBtYDGA_LRh#1tc!E^!vrF}Rb%e}yJJMXcn{b+=sBj#S3q{|^{K_U)K zm3#V)2S(8|PR(uji98P0R#p?=S`p55NjPB}(efW>oKVi@UN!^vj~Ep>aTG-_zDN0u z!5#tuQJ~vMfkAR<`GZ@H)6W{v^Q39I0Cfm_kXr6LUtf~gXL}S)X81KUH8I22HERvN z6HEL30C|Q&L09EgMrHDm47gMRkRpap4ibre)sWQ_fOnd{bh4}Y2-(@PrlAfGwS$Y8{E7mTeWgsSe*MaBAo94P0!1u0b15AP zTo?_mQ^i@I{DqBtcl|}$uacTrF9s-4x9AVGY7HG%{9`Of_^P@QrTvrdojD1?$!vCO z^6l-%(_IfcW8NI$K*EnuCl`e;XAal`VEwNrW78#g^k{girOgS}i|{uI zcfPKnt}b1((tUm);wdlAu)E&4Pf}F5Ar!x#I`Co3Snbx|pU}k!02~GBx?vFl_TZwS z3synZIJ$!X)i#(?R<zIL|3KZZ{hUxcy4^Cxxvq}%LdHu2Ytuv3#1>VPWPzl_BZQ_dgrFiSs z9FTR}TMOIuf4+r6mShz(v%DVdcK4NK>6x69-dnOmz*HMl6X1=;u^nsH`}uRt^$$-` zt&NPBzNyI^L*YOB@9zFyPKZC29L(zpLy7LDJCjh)O0~WiMr9htbbA>^fYZ(q-Is&% zy4Vlo%w9$HdhqsWK3*<8Fg<0|VpX(;vU%QSd% ztnv}OjWG&BU9AFXXenC*1LcYtV7teF1jK#VYq@AmyctqucxiV-cX;%~X|~kDV2tQ6 zd|lV&ifTud{xvecU6(*`^<@2G0cpdRlwgfJcb4ncM$~(#iOPNu&rsVyUZ}#ERBYhAy z<74Jg5K|Eiv$9BSk~1*_{V)(E5ESyKGjsVUG)^t|4dFwCou%a(8XyR{oVkW(`+=dOziVr2kL;D(VHUkR>9#Zk1X*+v5223={BW4e&xiAQ zxR$`x<3_F!L>E0CXK47e(M*+5e3dKe!RNX8g@wH5ce0et1aWRivfjB{MYs=~t@ERQ z6a?fs0BD21$#=(w(q(3|FMm$2H~`IRv2sC4kU}T@D<*1KAW9abCnoX<2_d=}iF@|x zib%7wHa9ni@Nee%2cw;Lu8hF)6+~qacYC;+7;z&11E5VIdUXhN37HiYtE&TMK^u}M z%r0=--nbzHUPA3lV&4QWexm5_Df#WH+FH+2>E(Oab%DWxL|y+A^N^7Jj$4JB7wC?v z=Co86CxmRpA5Vu)C?EURTi>Op1iz17#2h?A4jDf@c_Vh?qz>Zb;{sxeaUA}8%MR?a@b|U zBGz`$DamBAAm?)A3%1i|Z{UT*GXzT9x-Y-d{`oamS*?Hw9_}j3))4G$A)zet%LMNA z7*jU}FBiP|?_w+$o04Lrr}wzeCW1|V?}>V3xFTUBFfyxF9p|4?7BObAkC|9m!d`=X z4a`?zF|%sRNKF;u<5OLk2E z+!5m=9zrKX-zm6IM^e96npdX%Gsvog5Pzh@6yK=1Lg(9LE<}IjLIk`8!uRDi`(AJ^S0i8m7<2i{NHtU#B*G~{^NLh zPg+-Yq+H-Z?34esBEx{#rde5(f*JuQ2S&`NPW{7M+#F7n*y6hUI*!A-&UEz47ntJsllspEC9=%-P2C0m69o3u~|nV%B#MV7G!2ODGJ^**YYcznWdk*0Mn(@`?QV=r!*ch7j3+Pey?@wF%l4sXIP$1iA1~Wf7^1-* zv!DL!FI0aquRr+x*$%(6d~`?~$m>tikn_+>)jZ-fDJpO}*XyB;)?efM_E-ujj>rpr^O+|J+eu&95#wHWFB<=gVh)`!c{5Ou*FJ;sM>BE#!1 ze@^zRT=fJk`q4r;V$OxW5Fj6u$-4Pke2l{v@pvOH&$;qkOdyH+58gwsoiQAHj`5Mn zJK}UZsS5yqYO+T{lsvf){VbfEkij=GIjNMT^5&_aLC9J7?m(*&rA~(-0|kx!!Jn({ znD-kx5?w|21ySGU>cC`a6<*u{%}kwX1OE7szFo>-9B1;B!#4B-E~2*ais#Q~KUeM} zfYyw;q^mKu2_Js0!o{g~C~cjHsE0=RbM$yxVA|>F>o0U6+zYdiaI`Lm>0Mv3-~;8r z1FiAWqN4D^*u*gCCsBp~SKikLGY13f$tO>rsu?v&Qk>z}R25~^I3>#x*#;r~paI&| zmN6JWo;`Vjs?LY`gTOIDW>&-4bX?A@z!&+y-oLzE+ui$mU~yOW&nv`$!kag#jEQii0{f>1Z||~qR20;tPjv-+ z6dp!J!TCE@8(-S`0m@>OB46L#0TqLthCO2snJ5VjRFv)KsPal;-`t}&KV=yrlZTBd z;uH}ptG@QShTHtOzjgV#Is`G8RnLg@DU+!C{EiF%J zJNQNNgdf)CxY}nC2Aw`}EbnDK%ptr_I*HTLPL#V)IXGXQm)lAkk7B$lw}zVX+a5EQ zhYt@YhRx+-3t+q-ne_MqLo(>^MY+l%F-VMBc`0NzN~yx|FoKN(Gy1aBH1`v?2D)<8 zEf5g&*~}s=>Wu;3ko*%eU|aYo+~9tlfI{al zO%1XoDFg7+W*TJE{`O%?@IC>#0bzY=s^H2$8>SN(T3mEMC6gVQL9MpQ+9Dd}hmB!~ z*94Y6{|PwJ_X_=k9{!|95uNeprZ#~%frEk%^uN3)w4D~E=3r^-$&lYpLC7V-)B*Sy z92`VC=s(0kZY*_*Y-8+VoGqvXw>Et!&ZMbFmSGm40LmaD(32^QY1mIDP!<6`Ksv`_L8G9PU^o z>Ms*gP?fHXf0goDnbs!U$70vzEI`=q-j2>_dEfuFXhIT#W%ui{0E^SO0-=hmeSx0x zZ(N4^kNl~04?EmeGuF4dY>#3ZUx8#(g&)CpZ2!Dl+g`l)_pNxA27l5iZoje~;C+<@ zHnaO++|zJ~mTF*{lB`U?k)cbP09`q53tZjNnTuEs&5zW{utxS69r?Zdk}2>s=l+-u z6%h+8hu{dr$gwrP%?TI#LcZOU^;EvNblcq0}Uwy?ppnY$sL1m@rlLa7Qhp?6DX z;}g}DS1F`_m;a6AMd!EQ-WQ;R;mWT?46od&k(8X|;&GUOH1;e8)m($@UYS^*iMhCK!l{lbZhCo_HO()&ejUEgPLZSm-rugc8|^5CnqS3~mX^Bx zk-vVU^NLt)Q8($fyE{u-+Q*KLR~`$irnORvBVcP%hbZqLXr{~VK4E?JD#nP_ga_T$ z1qB5=A5}MkxZyEhl~-HKvQuBkt4{Ya=KaBw-7$}(ytiumXTn-7CrH$!%ip?Sz^~8p z!dAz>3Bw^GbG)T%1|KI>6#q8SdSFUe|GT1?pDrY~hJ_nVyFSZbT&2)0Ju$F>xFjs8 z?SKPOwI|9uOx!100Hx5k2l|730lia2QpzXYoJ+?g)dTc}oG%`Hrya=sQmp$tNRN|~ z2y%@M_(*hf>^G@|N+9eE6TuwjI2k(D_-8&)=m3KTPgu)XVvMf-fA*g%uJ zXz>_zzcF`~?No2y;tJcAmc!i8zz#z2t3A;SI0EwX(h+_O?FWW2zRb|N0@Zt2^139G z+HwfSmWc=U9wDDd*!0>)7dCw@G23&83|{5#H*dsKx&efxk~d7=)y!hOO=N`M)%aL! zwvAcVLT-ZT)1sm}$o>JUcXI)15b(n~vawm{U1PtPUq!bkdZ6d$G6}&v#;)K4)7~Ix z-TLexeo%EnQJ>|B75~0)VMUry&eN`l;EsPWZW~VntB^{z%O`XoHwYByX_T0Q%W zs|}3{hQRKI3b6cxu5ksih!E&A2CBLxfiiPyb%5y+W4!YvLg$K6hm8eFR z7Q(%DzDYR+$}!7cGD0J~DE4rgEEzt{&i1Y2*Wsgslp?$MJn-KZdQTCeWIb%3E}k@t zwo>H+_HbWGq))Gt(9;I{58IIw=;-Jkt{TorrMAL>ZLeI|cB1N;(;IiJdT4RQX;SqOEhah#2=~#&B9+?N0vyLoCZRK;)LaynV@lQd z#8HKoFC_5p*&Y9R4I>l^Hib)ee;C^wJ{3fk@|Bd7&{%steztiQFYn6Q8l+CtEwBmh z_C+RyjR7wmCGK1b-zQI=-2X6drTh}pEV!S6E*iWBnpj@{AkOu!(a;@XmP`Cpudswd z1MtnU_q!vcemow>5Z!#R-&|DHAmQR$j-z$=sBj+>w+roqHlUediAzJ#RdZ<}8cBOf zwz>NN1I8`Ca@Zc4#6PuigvNhsqdnt`jK>Zdyu{2?XU5%oS^o=ZgQN?I=+N_rT$!5X zR-jFHby0_qpu~NzOt*viTV@`fu6z%7=n7?70UgFshl>?fqZU!Ls?o+T5G<@%Z`lz@ zQ}y`U;-BCHN31K_V4%al@fk{7+y$q(NlB*iCyvIl9!8x7-Bjy$z;-S24dXk%tJXhj zXvk=#L5{l9T>sJgPy~M^msc2Up{O(6?#5bbGUMIc6cM9t|CMmBruF&`f{i2-B=s}v zB+&KE11okr@=VSXvhm;VhK7ga9Hx)*N6}+-!5eQ&vvy?YDE~hDCg9;kyz; zfX8cpdj|9NV&OK?u-R0M0P{P$m2Y92DgfouZ0bVQ6zkF={x*xaF_9hC{O*sH-D?Q* z|7u|JDR~GTc!0brkY2l#L1duc_E-#I(2K3tH~eckGd3RN(onx~M3+?R+NrDyOHktVX{2TKU|v8dv{o5MoW{Vu8}MLCDyoadD++LO z(8w*db1?>%4!i>a4j9MYUJO=1hu9aS0O4J7vTrsZt=m68*P9w?*N9ykfBPx$DMQYKXUg#5 zw`Ux!C;2>hPmSMY4nG4+)-uG-jd^HRU~HH;j1ks$jC@N z+-k2cA8;ihG&V&)dot@R6EVL0?4qek^fAF}pwFYxb&5=ML%8%y9O*n$$M#c@wo{6u z_(hQbaeB`b?5cP}Q3j&5Xj`mUzd^3ClPjM2o9~#u6dhIFp8$r_n?KJmSl1|!Qx_U5 z{Z1@n2Z3P&rsQ;l{`Nryzdsx^Ja0ECt&`-zeTj$Z)mapUc(hPM#&ZwwnqBK2RkMJop~_ zd@-lUz{nfj6%zp@uJy`>f8jUxL6>i$B?UPWcF*QUick7g2hnwwlAfeh!3QzU9Y+Y9 z(^w!?mfD9lHe%d1sH-;r$rF7?J<)>@5d&pYpp%a^cR*kla7;)n+2*K-U?u!2xZt(g z9L2!L455+~%WwC(SwRlGAp&v46uOJ1Jj_ZHeH}eLIMbde4ct5)85Tyogyf3y^B3Lg z#_aXEX;DA`;#6TcMsam^n>G9lE}GtC#uia$Wr|TtNhV^OY5_7QqMYn>Lf7r3y1qN| zhiX!S2zW0@(#q~-UQUuBzGI}H9iIP}LXO0U2%3Tq(uk=X7X~wrDI8T`qXDD(OUsW3 zvs5^VX4W^&>U*bc)PB;xO{sbLI6%0snX^#wttYlj2pz>64fo$o@d^oXCzmH}<5)la zs=TTZzk`6z^wB;d6F5{Nv=tVQv6qwfQbHH&pc)8mg&vf`_s>fI1>`9z?tNYAdX(+C zm%^xl5Y51|{O{&0PYmyiYhNja;=e9Gnjt3#u?`i$tACH?O7MT2ba1k@d-yLEIB-&| zSX~)b5@Y2c=g5~V_PL|1uhsKFgIC#P_F@x0!tn6nvgjX@w(X)XE?KO^cI_@ppzY4P zSn7)fURIU`stp`=gYX@8SOJ*O`>cdn>A~p2ntPqGFawOnm&roNAo7FV{CBnocs0~y z*eC5EAY{a)X1G+L(EIUrsP|va@vT+)!co)eQ*@M2r8c|lAPCWiex3_t%EIRAOT`U_ zaq@2E(;`garZ4TsU*E4wc5Hf)$=i>5XiNLpkin7C<+dzmu zrbF^Zj^WEkT!bA2xGBPv`DudzOx5;XRLY`xkx>W5A*L)^d>D*QoZgoZx+D745NcLD z8GUreF@9Cb0%YO!{^XG3+*Lx1$w`skzZSRCH%FnPi;Rq%f+g==*S7vr-|SU& zP--rOiiPO?mX6avDi(GDU5V&pe*LmX|J^wQG9{jp#x`5Af2~)+=%B=&SXgs$s=#|| zax^+#-SMPxED2#|b{0#%J8$xi0jZuqHF6)lw$szDg*T?<)_blrdIQjazvwqEwQ>zr zhf8^VHhfQctS;<69H$oNE*Ol5YcllkUO;_w%s#`?dOD0mnDbDv>0WZ>z=Lnh*wF=k zOAb=y{`OpIyC-35A0d;ofJC6PF=2DR{LpzmI-ToNh-ZTlj)F8yo;RU0cO#s_S2eeB z?e`mpFRemnm0c&=_w((;ToaoY)8Zp$ZO0S#5bQT)+IUEU2XDqB0wW38kbsTB7##i7 zO6kzLjaqJWU#CR{QTN|rCBVm*`RFx9oQ{*)KaRCK@$GDxT5d`zG%PaB^b_oUmx~VQ z`E#d_kBY4FS+Lhb18`Xaea`SeIt09>ScH;p z;NyRdk>j!YhxPSxm>1TChn=~Cw;WD^%O)m>*%;d^6_FE6!o$=y@*u^5qa7e(%#zd! z@`r`v(>P?I;DKY&fhFkmZWNAtnaAv<34|I!munBi8?R~Rp!meCJ^d2%AbWwlfuG1( z9Rl$kv!Tc~s7h$)QF__Y{ z5$#rx@Rh*GJ~7|#CuX0FM`2f&s61Z9$!Q{t@?%PEaLwMBSU_rSJ_7W%9WfB zD{0qXnF{P*9mUkvb`uarf{4jOeoZ?HxU}Zs8Gh~kZ(xd6+qtH$x;j}~3!M+PVz&m% zPUq@y$mhWg9VJiMyFoeJU^`C|pVr0FC<82EUaJ&ecV3$xrYdxFs2NTWHZd$*K36^sTP#MFxK#OqYWOHfeXZ`Y zW7HD&4L5DbFEED{wVz{)X!qr%!@@`%G=Zkt%qQ&pMVh2TtTHrdNL#6kVYvqmko^H@ z@S@Ks(%$}B@EUmEXHl-7O|Pz7>@u_ZERbycM)9$LmVp9VENxz2GMmByIfy(K6gi*e5ZeZSCz7UK^fCp!c5IEyXUJt=?+4}pVpC&AFqKI zc+30W&wE8hJ+fqPy!P;hqjVaP5a;|i=!F@6&Pp~co?#}y`^Zck2%=P} z7&(ER%kPAQgdgP}#(;ASp_7lT8z-0!v#`J;g&lY0%d4+eR#t#zb*GfM9iPwP?|)@% zK%|Z}_-s3~$UZYN0x5}4l+S*($F6wn$`au^{}1q}C7t%}q}$R2F%e4t^=lX{b#^3K z8;I?Ti7(7c99;kkAxy`D2E+SZno*~63ggkUMxRS13VWJU`7XYnbB4j;6tJ<-h*6=MF5H?mI5l&ACl(eAojnQF!*;RmFB--3knTP1D$3~ZWvE8d_xOBD<7u)1KAxz% zW5CK113aJ7(xdI2u2X#Xh)CZ-(l*=}mamiBQZl@^-TuC<)<`qvJ>ocdJi!>>j3m=} zJ*80dfxD>(koBgb3TJHY1SYKD^{JEmzzeS{aJp?BQ)~*> z9Xg>IJSlt65JM(+@+|S!znP!E=BF6=(a%%vEbkSmp~7p6=#yoCf4Z_F0d&F&_9URl zO8QTbW^3`qs+}mn)&nX=g+*N&sj)c|nc*=iX zZX=2S<6Iq;^_}dJ5)z7o5`(@az%7m0XXZgC9G16;4*;PtzU-OZTt{d)nvOePDF&lX zIU4?z@0V0UEIKh2vx*v!EH5*r&O$BMzy}lq*bTKi-##JOluEO~Uk)}nK>R+h0lcNgLSG$!}(enxg)3eh8%JumpALBmoh`f48Buto^>>W%k%&9vwQnn81{TAQwhf$PF^HPKVf5mX z#yL8Vom2#0!j9~<;Z{qk7i;wr3;##ccR*v^_y1oZBt^5+an0%FN0vD_c@X z%C4*sqGV@pLWNLCb_gLnNJd%N>;Jm$-}!g$^PJ~CcZuuzUZ2nV{hI6-rPx|q1sMVk zoot?{Wq)UFZDq|Uy(m4QS6xFL^ZL!3nE6X-bSDXBC||OdAZE)i+k(<;wD4s~W0nm- z|LCXpodD17rOj6{=<@{%mtEY%# z&!!*f>MG4tMfoZ*<+QVw!oTb~W%6xKHw)}kzokA36VoIiZBEaxvX|(`mJ-t-=e6oQ zHRA-AyqOt6RuacT-*gdw7+XnrwqM<%iDG)rh! z;P=^;L}Ulba#NDT$Z6d>0(ufOlq{W(Ux|;79#(n%uDTJ)>bH}95TdE&#hJ_J(koK_ zbyQbREh;*2R)QrIlmG$V>MTYNN!Gy8K5eLN6a&F@!`^x&<9qERBzC}27GgpmV0nk1 zu9y5D|4JzOcW~(7Q0$6DQgg%TF2cZDH&{nH{ZBK`uX%l`inBeyX}b1RFVn&KB(v3K^H1ZJz>f%>mNhQOqQ%PT)1uFG_KSOkG2jcc2^cvj4ZT+?VD<2be-NPYRYqFSM0TKmqEpBud z?-VrbP`xxe2*<7*{sYyTpx~9uAyyCO`S}2tL1wPGcM%<8$oylrw8Yt*os+}NcnV*) zUN~KA52Jcv()Ox)xU2Af0>EeLdb6esYr{m3o3mTk9MiCVK`Zf9)84exdC}_jueQ*y6A*s{DLPVCZ*=KNCp0+W~>(2Lt7uU3} zy>sVfO|`ly%yb(mO+g0<=SUVJF0wVgUxhus-5V|v!}8GXfCPpDe{pll2_x5<#H}hE znM>^X4G#b!SKkV>JAOdA=NG6TpLfU-6Bh0iqyOrHG^PghKPOgZu%*x%P z=zf1{{jU0h*bYC=VJaP8`blk3?r3;AOZwg`C_efX$W|7hb;FgfMvoO{;bN8-P z!tbK~Kh-FjT;33T;X2Ve7<4yTek(;^WNyw|bKZTxLS(SK94~p{+1Xd@GkqIE_hL(D z;mAP~sZ|r?k1txRUbF-%ecSSL9C|~TqnWLiel=nPK|QV^3HRymd61f~R#?BG@M89w zwjTQWYoHj?Z|r?nWz2Xr$QKTN$Tq!icm0AgDfwP(g-f3I!A)idV>7;(s%iVCcW!PD zceeS%^I1CZU;|iGA@@^CjhHHbq?Ecp=tjrp zHWcDe`=@7@MR3IlT9F?MP?KVD>mZE{CwfT!64M$1&8%!_n$3O=Jxp|lu#+{ofPRzP zYwt>P>=72jJMHOJLZcVRuF=;u6zVU=&;GF9UKGtrTrc|7AY1ci@i#sw{^mWe!UpYWzg`6U&C>ERoH3fGp;$uX zPw7krvic8_`AjhjQ|~1sfzilcJydBFkj~US+|oK1JHa`ZbZzVN0zyWaLrfVd_Qt{Q zNI=Cq`>TprGFA{&nPl*7aL}Q=AI>KSg#T3R3LBX}@0In2mOv1l8Q6h_5iOds^LdyfpAz^j~ew3dPTz#<)Zcb!zeEuUfgPsU7_5G=}7m0_M_R zYKXsf08Zbeg?A8kb?qxPbilwNkb9Zd|4IhP2Mz7RtxB5YHxa*jH3(@tbq|9&C!uO8 zwXK1sY#{Z*HHn+k{C4|%&S~iClDi(s>ZS~N%7uLcu6($>VyB+kI65LEf(X3YYN?af z7owCK1@Da>@LYoEDCCucO)g4B$4)4!5l%Fs0M$Zd4Xy=#PlyFCgqCUVGe>NIJS|>! zWTc%>hXjf;{{X~>S2Auif2Nl5b2_`iFiKxO8?>Q;(jSq=-(mRdi=3e4FF22qUoesq zkFA(YbU8X_vuu8=EhV<*9nGXMB~+_|60eP=-_zR6uV+Q~>m< zBBJf;71V4=mt}e}8=hbbr)XUx5b$mqM&Ij+*!7ulZen7h;|oTGcgXcR)~7Y~7^xYY z4)KtSXR|1Yu`F$D!L+7ddV=&=J1ut$eyb9HD#Yp? zGTsK?AFMCDaEu#gr<#CjD>;Dw{6%3ktM3C(ghi3SfKGWE@}MtmZ9&2B!I z9XxTdW+NzH2@YH3TgO>Kp&Fm+Pe1^dd z+5D|Bc4cRV9Tgw-R^2=8HN3I`f@sPTJHCg^&lEN*>@0h31b9p3o7R{h)GWad zB@`9KspJPt3Qi}uibN=D7@y9178@(tN`V#LZnpv3IW!NVC#vG-kvkeb?-v>hD!hk% z6I%a0$e3M=Vq2*BBKKqA;^Q%6DPOd6czUvYW<=l`PCrDg-V8$5&&J9uc3begTq-Qo zf$!hTSd=4vJdi$5DHWcwsJQiJbp9%hluu1f+2?YU87-hEoI7d{0m9u_9sPlyKU)oJ zH$EF09B>?}K5|?I=o?aFfda#?7R<;U)tLTMTsiV(e7vD?-#LaDY9hfX5;tz#J$?(U zNwm!Zj&xeU6$A+yEugb{GYT3}nM#ahF4lZ4RjMf&j_uJbO{{ce7WvY>5#iKtl+rs4+Os=P|p9zHfw(U$7e|K9P z#6kd@ZOfz>f<0tj-6mf}j{`_CvEXZ_rZjeJ*m*MpDu+|GWLWEu>}p{2UJpP^hOJUa zi3b45miI~A(Kz_jgl%k`~pZDSkwR8IP^X`mG9M&C|a+SoKlhG8XhVQr%wd${&J zLYeEZVlS$l^Zg$go~M+Zl9%s)U0CR>QaQ=b-=ruQYrUqX%^Tgta}I$f3*W?2I(Tep z6Oj<*XTA=r;`;wwsj}*vN2%q?zke2$r~El6yG&M1gmGAn*E|gPYIw>&gsOz5u&)TK~zC`m9Tr9#&Gk9HRX8irS%nq++4$`^#L(3OHvOfrFL6Or80Z73h zAj(Ycik&Pe<1^+%qCmULWkQuwL9Q2)l8@mWQfnDRFs|9bm6+Oi34eSbAwb|~03HPp z`;a>_D3r~dRnF->?zRosLxMa6q(A6?I@sEbXF?;yC*Zg!hN$4T^Sh4!5j~7k5iBLvHXG^S^w@bI4~hIz1azH*dR~0 zv<2A@+5w%y)U;#<`0)2ki#`zLkqc`Y|Mors+hCkqYK4&_`N}PxLn-T4;jCxs-V9gE z<=*@cvxi;PGmG)ip>`!G2~ze4;o|^_Kj9(jxNgZaQ<61XxqhE`M~O!(WZy>veXV4w zF)P%md1qZj4xH2uY9SqLZAl2LUe(lrwCQ8#gsD><-{YM7XNw?~u$HZjl}_))|NPXK z3#+INmytJziDB>+B0xG?&6ytl5{SDb!f-&VO(2118AC?#<8@PH69A|wmB;jCY@Fb6 zS3jWm_q;*OR~8X}sf4hp_Mp~%h-_ZWClCO*<;jzEyTtF8P27P!YZn2zUrdTDcits1 z!~u@a>3w|E_&fVvHC`US1JJLITqu%3-Ue*MBR5hzl#Mgx<%mt0qI%Le`ahH%o9RpG zoMzI~0~l?Xi?ycclxOitdlD}+YEQZ4X=P!`fvoBLQEa#-fu(*?T!`JrGE8d+u<~aa^XP~8r{FsECqa!v1}88d zI`xaFB1=fXqm;SR422{C^}D+W0F`9e+VFgum30U1J1o*AV8-;lt4mB={KDe26`^4s zIz2%*`$e5>D*g0t-<`2sD^$R%>5oq1Z3^6VE>?y*#Ae9x}G;So= zV82|m_6u!8s278|wt#&%q4W)dj;SYe1qlPj&bb}2xmJ1YLVwOF|R+_9C`!im$N-1;hEt=al#TEe${V-KhI zE;5RH>~RTc$q#e=WenDrFTp1rA!U>uR?|iUy8-j{)}q z>0D5~CcO2@rgUZNvI+Nsrk2SFW=t`LlovjMKn3Fp^92g1kuF%fv+3 zJHK>RbZtlmnll@+!4CFZVo$lxZe_6?>UXlq8FT+M)+!O3WCu#M7RLkt*eRK^q$Cb; z&cAu(vNuXPUaZ|;?{uOftD_FS0lxzm-KXoP!bTJAwgyTB+EUV|9t^F{3RNv1k1v$` zIK2d-?zSo9*+gZyiJQmo+i%@vS{ZZ@1F9P1^Ko>(dBg_WGOk7C72Bf`fc|M>{#C<| zAkG&QFuv7Gk>bJ!IG=?e`l?2c ze4>gk(K5;==k293i#k}X2j9u$U&=k$M(b0^JG}Zc?Si2?7j2NOj2hF_n**gc4~921 znXtd43LEjv@HRU8H*T)|^WE=%n`xbzpy-9?g*yuR#(H5~K%oQoWZ3YriE8{W)f~Zi zJ^9A{)%TG)AAAmfMiX8StHIcH(w0ANFb29l78NOtn4^1 z#e^8+=~1S7EiSfjREr`MI^jxrTI=Pk!d-Rz2Cjimm85!VdZvv>)P)Z_vkn#AXsGgL zj0p-h6LudOi-zz50>Vq3S`hZ&vVzxwOGUvh!f^$$Z{4=$-mgDh*UZxCk_Px++Gk%H z*bB7X#?I~(o!pQT~fJ{zS2bkEi?9?=2QOV}GI2u4+& zWe_mFed`JOaE|n~l)V}6$^YsneFf;U1cu$Dj6FOmuVfH`APu^XA3ty<4ck_vSML7z zP<^W*LrZ<3re<~M!TYUnW;t1vNQGo!<5|UwPGtRv=4#`v(ked`Bp6Xe2bVn|SJh6wuFcv`VnKOF?o?roF3Dr>} z5c_2CAAUxl<4c1F=r_{;ekDKUii=kTD{n4Ey>wr@ME%}Qe$R}=s3hdNxZgu=84SdX z?o7DzFeto$Z~|=r)@Pj1Wc)kD5u^0UlQ3^HH*Z3>=U?X%2`m|>)6te#0J|tRS|SPn z@w@0mwzd!`)1|Fm+wdju?BMdhdvz;E=ifakBkA}MYqAk3G((CB4b6(GEqXAF2#ewA zZ*#wn;RKVMwbPNDFPLtK%2|PvphC$JIQy{9KIb~6fxZo^tk){r5f5)~lRVw}-K;us zJvL&piQ(C!Sl&^tEana?Ze9+kvTUxiZjB@k^eT}&cGUHSz>TzzA4C2M6xKtt8Chl- zR*5m0YN}l*gF$Q`Fz1+J2EqqlhzHL-eb5^_)r?~Z&9@n!_0a?|Rv`6Zc{$2IF0lY#$FyreGdX5h_i0mtaJ!ZmKB>igVdXI&7=sM*O zv-AFgDX(F)@%1%^(Tvt}rlzJ>uhPGWj^Ah3LmSBzclkT@u|CGO;zAiojYR>3>LBA1 zjDm!H?>TXQme9acJIoF9&1yhg8_@zmt=7}rHhwEKEbQo(I!oz>_>{rr%P~F&#Oq(~ zB4D{k#7(ylFWqA;0p=;+^B3Shd)CalNE~(ed=Yb|XzL3R z_l!J#&IaCFtvevP(5_j;@JvcQ&6qfC3*7n=~%SzB-7s5;Q*s&DWeWdD92umMU#(5 z@+e&}fq+pOjv1e9M%<(*MNd8A!|@2C+Kk+O`v!6Xz)4|8tiY?*58iC8&Ld*TH=aes zArJA*gze}U-oPM-iM9usc|W?l%YM&evoAILBR%_G6o2g%^9m^030PZ7au)tS-+PoKC&RkvcAIJW@BMw^`6_wcFP=Y#9V_L| z8=nKZzq12Q)q&P<{{_e)Gn41Vn@9XPp8$<4xXZ*J!(ef0!^94#5KNya(aj+G>h7ce zM)QmSP&W-V^4N|q+-jTNa{W;Omg9IU;9@~vAw5PYBFQ)NMSu4xoIesTA|@{goGfl| zEEio)SZ$|`BwNiSUjZ0Bx&LUi>diCxx;aZLH9J)uHe-uxpplGyZopgk)`sWXt{~2u z$Ve>$;j#+*|4^r3R2lhqpq*JHtQ<+sd=)b?3|a;*HpuT9=mz62-Y_r%`bDjK4QF&-V~DIjG_lRKhW+OxhAegr%kT zN}uXTVo_jiK+dElnc(4t#l>>_KKRrC{%G)^BUGmQf=Z;tuUBsg1h#NI)F%NXx1jVL z9-av-4-FOjuvu@ki@VI5S6i*cf3SR@h=-^vv4}I3 zogB4A(!%4qy1K3>VaR2BX@au`gG z!Y5D++NT5g6v7+odoOjNsq4>jv(vZGNpXr=%~%sGi_*R}SL?Q2grgnMqWdv+p!E$c+$(6J?}dQ_ zuuC!xbx^%ZD0kWvIdK2yk=h{90q>2>ff`4Musz3a*(V3;J|QtlrQ8+?-0D{SNZ^5` zQS*jQBv29{|-C7NB*V4)M~yD^ z1(6Yu!grhA8()D}N2y6MD>swuu3D~qTML2%!SjypBK0};48LDc_DEA*O@l z+%i`*!=cl(iT!-D5XO7Nb)h>Cx|w51ECNxDX`PeEI)tkmzcOcU!Pv8!1iZJR0_6;N z$qbVh8=4H8bgS_5g4i@?`uY+AaOVHn%AtbyEL$5fId1hI(OZ!_)u>% zsRh7*`RRJ{okh?2PmDV=ZQJ~V@5uaWuQJ*NvB9$FjJFo9_;yEju%kH0kM_kOK#>P0(jH-ry1FWVaDKQPPvP)gSL&;W(M!fj_c|$_5m=~`03PI z6v$EInS!S(m{BMt0vZbH$*Wu{&@gH-Uw%%mH7Dp^iK>$Evn{|=? zu!L?7eS@=L_};`ispAmNhY`-^^N)1YIJu4v$=VaDAn@Vs@~ zh+*J_^6lpyq%sj>R0iTX9wF%Cw*)|qz2tttt6(q9YNRSWFnYp7=2mw>^}Xwq5D`Jl z0)fZo13;Ix+0rb;P~n=zgju8a?h!?o;Xp?nd5FSg1RIl1*cs4K1;9X8CF~z!BNh)` zJGSExjlI0Z4HJuK+f2J+g6B#ezgID}15OKkxB52E9iJdZ{W7<G*%(Sq(*$vJ%P$_yRgd&4i#xPj(bot8G;a2w^rD%AFQ~rgzq&yA z$TY*eZM=X{f*eH(nT>s1DhQQA#mn42r)=@vYRX@}Z7c0Tnu zB}un{MLl}ru7(_1nMpZQ6mufkZSv=VpSF>WX-(q_Z2T`;UhD8`3-FYom=6j$IEPfD zI8}-Z3TB6E{lwIt_y3Va?<0VHGz)-}OZ*lF@y&xf>(-17LRu6h2CrWGUG4w!>YgPf zo@l@==vTc|?CaM>;1_=%qz^GiU7c_J2wt9*yhIBz7MKS54lDeK;af{DF67^nZM&(e z|MQb2G9e_csi`&Lc49{&^M&%DQ=i5KR0U0|siR8e0Iq=-R z=Tk5DsHi9(JQ1Vg9#-LGF7pTnvcMlPh8T5Wm=lFZP=Ke%Ea6&d1fqj)ro#6b85!($5Kzy>(fP5S>d&;DC?v`{O9JbV)_$MeH@d{0nhDz!>DzdU#oe^A z(O0o0k*AFnGW8ydxEOQaKPaN?TOi(NssQW^*hO|+5-ImK-9M_hc#Jq0|9sfN%y3GZlA`46P}lIDfu*eFC>`V`GDV4XOOCnm+gOahuj@2_|Zs&>)&8Glq^o z`jj^dm#EI5?-diHb{KBE zKECW$K(n~XGC#jvoQ~;rfUAL1+`b&}PZ$`)!R6nKDljhRo9@$2PL~g7B**<*^TKKg zf*CX5=@aGWT=berNSTs!hM>6$Rf>A?0#z)648fY4)ViynN)6|t8%7cDUY?VKsnJ4M zpwVIh>V4=#9tKMwyso_eINc;pCeah?S>01`VdIY9-rg6`13Z75p#z{L#jl9~TfOQ{KOi&|7S|H}5;MprH9Yyq+AC zbyg*V)%On*AQu@a9YwP@;UN%XklDY?Y}H?&t7F z9Lr1A2m4Ci5O>apH;HZUNWYqt@VhVk;iBCCy7s~USghxRXUtS@#p*9(Af- zLw{M&Pfe6;j?SCa$q56+aX8>z~({Ze-%rA92>v(XlW+gQW<% zki6c+t~H^ zy1Wt`shU2Sp8$Lq!IY3LHdACgY1LL)ae+EGHePAJUj*w*Qi8^2011Kcv(k=~Q@wZb z58G!G)Rv`{mC@&?YaDR>za+J*f)v@$>yBZ{geNzZgl2!x=q(QbRX(H?*tO5FBKjZ< z;MHXltPSu1+9&&cU2ITlAP^33C%$87xR@ROe*T2hhkf8fakWEfhv=IdQhevrwTh0Y z$A0`F;t%>F&Hrs6TUrS7PZI8mZj@JV_QSXeHVH!%^G^gbSs5zuXKp9|an+qpUy z8RHODaAbVFJw18|>#4mG#RU@C=5YJlPvm|ebO~jVv0`nI3u;)b#eKJ)@xCXcLK9|q z|9g+LX`*ZEsqa%Rl>@+iI>^_&yf0r4h5ts@B0e2L3m9gEOx#Cy6VCA*f`1kXfsGb; z%9_R(a3GiTCCzb3sU)`beEW6_ejBu&o0vey#7GtJCfZ@+Uds?gsyr9nmetUUK0kE@3o*=Yw~ux_U%(*v6O1O}ofruZl z!2(x90>%$2A#(iH*?TdX@u)}@xhtl_a|nc2W)Dne%H3Hv9UZMoeKDq6JwZ9o=O=Fw zw$XK70*GFRmeKUqn(8^0mk00I{WUmdYARr&-(9$TD~`pLeD=bY^14MC><$piUQpM3X|D;` zftpz>ho2U3>xi>EUPLl^+yh1W1Kr(M4Gj8*hQj}KS4c9V1VDs|n1m!OaUT>3Vn^)a zqoUAJ#~-a4krTDtalMG9CkzL{pWQiK|Cd&`G-hXK=T|!n3LRJ@Ceb{#0kA_kFC5{13{=VbLB|zR2{06OUZEcN>^4@A6N{=GR zQ@HrKyzM;xQ~-tQ-}Y|Fz?>x z>iqem#9EU?#L^`DKSP!mV4U{}50>=$cNT=ow;B^mWtVg&5vFN$^;6=jJ4?%9$%=#T zJ)&$s;7${joF#``Bu$0 zo+)z$FB?q%XOC|lK40lFssyc-?R#P4L+mVd`bPs}!b1#uiL0OU!{ck1U_zq3+T(7I zeTl%x=r;ZRh^&#@>Kr@orJwGpW3Gw!HcWTi{#Rb(R(l|l3!xrOTq<7k!O_xpy0jPv z3J;zY|3dMO_uCJmqa?BI;?}Q{6@e1r;@A5|RB`iwB(WbPX*v;!`yV)=rODofwM}}7 zi>P!gHS~^Z99!AQ%Bjc8p>)g>n)mu3n#HwEe8Q*=o)In6n0u~D1vQN9BK-O_!EpAp zE$JuXzSbam+Xob0NM$kq>-~=ZUkh;f^sm$6C>gYktTn34gf5C&t9@M<>h**9Ww8w4x{XzH`+U7sXdqTQgDlvHW&@w zTh6T3O}z%PSzSFnqmqv0{n&A}Ee$xDS_|3P*-xlMYMx($&meT!H58&wf_A zUgVDAbnP7+M7Tw`xjv>&B<`270(+QxO-B}ym}qKRz|7*g$8iT;%}369K5LtJXEgHi zCA62>^&TC9lQ#X&B+PK=AAMWJw$aT;Yxf+8T_M7QfwFFFG#Br+ zA&N?D?|rVTD}FL8*{UDH7uARvpTdFbYkeAcmr5J;j~A+}F@9wFupnwEu;VgS9*4{y zIK*#D$-xJ5I9THAsz|hc!1im0&Ws@S&Shv>y4)Zl)8siM!bHswd?8Oa!%#kw3wK(w zks9QDqnLGYk1@+?a?$zNAzS=$E=STO?JisKRQgMnxJcV;%5Vcv?@q9b5kDVW^Wp+m)%;CX!9+(Vi;60YcBWHU6R8c%tZd!LS= zVPq5{(YxK&k6Lqt6um+RCo-QSxsunH@qD<68{ugPZ|D^FV#0YtNs@mui%J*1*8VpV zbDDE>p?D&glA=}9dDF8~Ye+Y!s>FO7MvOP6?XK~@ArJubZF%oR9J?5^;7~O1DPGi{ zc_8`!YOA_l3E{H^17B{pg}H$Q9!kxNtTWtix>PH=bdFf;#ln0kk(q@>%6a%9bRWuG zkI!R6PW=*BYldV8Awj{$#f%c&@4{LuwA-SBAm`J?hD zMbXiu%ldfXhPAada7;sn4KI)w=V{NQWf1i?l)k^Gr~4~ittD9@aPU2oI3%^4x8yMm z@l>Gs7Zbr%2JhS37nNcUu+$-l=(!4-ZVkG#$BTYZCpV41y}u!mnv!Ig_j>IP8A)x5 z=6wdp-@jNCU&x3oP~!Y(=rr z(=SUg0@tumXlh(8=)tQR$pw>%&d~sq7HUtcc2%s0@7}z*5l=QHOtkXgS)_|I! zf^JY7kfiiXgX2L$8mj6gTSw<%H%2a)hH`OLN>x?LpuGg+e~rLSeZ?qybeUcaZ{4BH z`dF(@RbrhL==@(kY8sVCNm}u*>;6c^u-y%~3K)Di5bZzZ`*Un!w@iRGgtSzMjxKpJ zFJTM=SE~&&om-WCzpSqb4wyeQHgVs2(k0c1tc)rB)q^x0~`gHEf8XrWcWf%Nm%AXl?Wez*NpU#+}nTR@w-x$-6Xa6_bg)JVuT=1lfsRC%nCe)@`nMd zf1%CCH|)fGr)$KykohdAW0%aa)T5S#iGq4jUHutmNs1B;+*#_QL_aLFN$ojUON{L?ybY~bEoQq8_d0KzmAvM_|2 zY+Zk*KtzsZpU>{I*Ui!aN9^hOHY_Ja0!w5p>a^N{W$V@CRb~fni?NL4I>^ z_Ri*N1Ib+S$7X1&xKt{QoG1V5FOF;+LqtfU1?aK9RWM&L*FE+i5L_2sDazTQ%fR%i zknD^r@Mr|ji4zo0c3oCihmu=(s<-dYG8|AB7TwwPb9LFs96|5Y-4*kA*k5O0xbI&DN=Nx^I`!>Cs~t$JN+p(DNzS zGpeybHyH6?hIx>63Y+wHcEXu8QY`)pifyW3s*y1cAt4yQ{|6laksfip`kejzvA9dO zBwvEmVQpadzgoKhEIUV#vM`X^d>Qr@xIb<$2?{y1vk6y&3 zsELcN1T~A(18lf;D>8=svE-c@{kKP$CkjBFwoUYObqT*75k{gsv?hV(+~t!E*51uv z+<4O-q_AHi#A&?Auu#vJU<_Myf8W6Vfb|dH)lb*w3lJ-U{%-6A*g(qrncaTi47%8K z7(LB|G2+?0>+NXjxBZ~UCE^h98GF#HYvwu}-@)D~c-&Fntk+!bG=xC5*6se)kM%4H zRyO9&3^73w&_kkiy~TK;8tg9Y0l0^+RvxAI<**t5qZ9LE7tjOb?ObOH{OQXL`qFRMYQQKXhoLU*UvaTWZ_gflSv)EV z$w^lzhZmg8&38TIB|Pk$L?W};2|zHp`MKnTmoFq4J*4SEl@RB!F0%L8Zd2*GvfD#` z3ds+rhwNc z*;xn1COTgl0}oJna%F#zpOpdFag9T?x4?g6L6d)X97}&shiw^>gFCA+(xq$gcrP@o zd;gmW$E60*1*905zR961S$}2yVB6l~f`=FjbT2x`@C2@4G8>H`8VdC7fusS=6x&yc z;*e2EF||TFisHXKPDa?njvX^zfQu&df}MET=$m<$e#j}90uPWOR_W-xvLxH~)T;uj zieUHs>`LK!QY#;_3%~Ch396mYpTW=C5YA3R?#H+1M@eDsNwlM4af$gG60*%Z6su)dp z__~<;wrfSZ8+4c?S_M70GJfAG)#qckyph5tRbf=zD&`RD!LKix6yLZ=oFXiOgbA7oh>%1g2A#?ZevRwgMV?Z*h z?4^3b_S{2^=mlbZ_O+jSH5v#l;C_9$fP#vYN81V94Z`+wteb_Hv{erbgR zmopO7vr{*NHGhmfB-@;l{mBjD+j9W$r)lUj#&u ziO2YiUUlT(?$mC9^eh>rJNIH;Qx=AGt^lI||$j?@<$e9DxMj&-U?|J%?SS zIy#Rgw@XW_MUI7{+hWogkU+MY2I>%(7n%U<#;BqXs@ZB0U;@hC5&8#76;`+rHAX?B z-V$pXBj-bmjMz{S^<&o~h^toO1#*;ysnH-9pw@-}RGZybP(q_keHggWh45>u^6Gad@!!tEFbEKjPd&Ot%l?=1H1tZj}=+_V*uJF^oND^&ARnOkcVE?$`kWeZyEsvR*gv9z26 zW6qKWo|G`aRovB+TLK|-K-e(qgbofmaijregfjQLBnd&%W&e`SuJjag-7Pr{%YZo1G~K~tPxKE4YCMZ7`McNj z3xA`ih=`V{Y51u7=wjRA`J*B&1=^8`rf%h=7Bi4;4OdQlb7-qua2|OTFSXNMU{!Ng z+!vagaCW)BqD*yHPP{#en=EX!-Tzs7socRQ=Ur+Z%uUE;gwoV3#|LLw-~ae0e`jDq zb$MofV+&JKrQ&W?%Rd>i0VB}%p?V7|+(!R{dB(q4C%umzSj*HlYaf}DW*u-hIv%ZX zdJ@!{r3mvarv22^2oV|^9|t=>Ha3RVh*B*wLQ(+27T;S|?B-ec4%QYcFtIWdGnH9y z=DFZ&8XD)KdX;+ZCCdAe<*%#-{rPQ}>GU@rmR>W@I%>bN`s;<0EEKH($CX~`>vyoD zQnHq|qW{0bby(X`DRCs8)~=4$iqtTC@3XH|nZ!znk zOdVmN+qZ5NLnnrO&7%iOO<)FuG7jIpqmxtO!lhk=Op!F`g&O7&SqJSJ=xX5f3VSr% zMG7?r z$Dt1kgNfcXigo$ORll7xWoKZwGBuuT4GJrBq(mxXpS>dzAQY-boZ<*Yf6kG606*H8Q??xN2L>8S`%LiQVQJ2LsC(@=KdMHrpO z30E)g-mU1s_&jZ$7z4lXl@EiA98y!!M|0>6iV)EV1AT6hA-q|1ORd=I$>c%9nlT}o z2HIbFT7(?fOAtG+cn-mz!`g80aT-#S0npt3{q)my7fDtu@>q@8!c&E(GI&%TfG&Z& z2lZV(md_*uNu6UkFIhS}7h-)c8y>r10eOITwr0MYB2R>}%`ScX6?Bh*gJhaSTpE@n z49EWYvpSeApjn`2q8q~^dHeFE5JZ&ca#~b6_0A4>EY6K|Eg2o9+r{)|B1mqGUo+E^ z8Igmp4A|-F{U_;%p_@SFXP`Ok!gwWuXxPaKNg`_?N8&D#C%lwOKzYjV{NFbm9miv< zao-Y6RM$#%<+k0QYIV?@*%%j^$k8b@t|vyL6CfUGZfi^YFVwHl;R3#{y_Tk?$u5HX z$P(j6uT%20u(9SXLUEMbKc{zZ=PNoWE%`3(M+b9+@)$6Xc3L_ztOg^zC247CNJ3vO z`~Bw+JtY~&E!;*3E5iL+uYj|%od4$iD+FbjGV469 zI~NdcnU~?uS2zFICA<#t2R0<9Y4}^7-M&zy@1>r{VYYAN>jAs6chxIZGvx?2-^Wut zpl)`k@Xv!ynx*>hYiWqBtdwBmUI3Cn7C1Wog9jSj58z znF?mAu2*2-goO*8hGc>N33L+6$1k$qKeQZ+3;5V2jQ)>4v8JAyD=RQo-+GAN(V6*C z>K19URcuaeubzHct)56@GYsUHg%A|V3w^*i&MuJGaH&{!DK}2-g&N>MU zHTW6hC4JZ-v1oR?jxMgfefu^^OmxQZM1(SMN#?tz`ueDXz~GJKr|U&#mjc^mg*kL{ zTxr-}yUi}PwS{j@>A@OdaMkTsa~xiTdTBHomh}H2Nk-zrFFe!Z4a1pHvy?&Dr zjeravUt9yg2W8jK`JM8+zg#XS^zA%wWFYA~c%9P6|FQ+WX4jK7bcA7OGaiBN`|}c_hptIpBU)j^C5A*bN4i> zWP4}(Lsrh~bI;>nTnm2kG3vy{f1S4CMjVgg&O{wap@7vbR6S?i3VG(NNdwaOLT0ZT z#&7u@KJ@RTHVNU&I}-+^|H9&Iq?THiaCW0xNNqM$HU0LF()4TPLj*ozMB-qdi}sp<2Sv8&jQ`|Kf0c%zz*)H!$@QTtc} z*y5Uw4x$pkXi?>7Y!>SOV-}^eYF=&AvQ|_iP56l@;!2tAbQWAdkW6LiK$;+S`ZO+Q z7(`HwV_F>2Mhwi3R;6cVo~-euYW(_@D(o1!O>DU8=lo;-oYTYC%zKCKPU)z~2|Rdb zYs<2q)p_~fDsQ4!g+PpK{nOhF!D~(H1|OgMaqS|M{JbYLqLgQ#Sod$T(SK>peMLmd}ba<9p?4YRbwO>Zi*1`YR*qKLD`G)WQ zl_4QRgbW!HCCN-Ohcc^F=BbiIhRPHnV;L$#LXyl$5<+Drm1GW?lZ+WcB*VG(_qWbj z>-=%nI)62++S}gm`##V8-1l{Tt{pq93)FEpBB>W01`iJstQqTn06}ag{Ph`@msqI? z-DLUVii(OSPY8Y4VeOxw@Z|Jav2>&;cu6QJJ#HRb_FB?(>-9!a3+@72`lOgZgvBxH zT`D){z8t+91~rOVncG~bp?wjXG{@H_WhO?C{a?R+^Ufsj8BbF&PGP2K4PU>wKTItZ-V|6bUBB1eMvSh^6R_Wq?iK(zb-j*LkNM;o{`-Bu z-_2#BOYB`t&}nIbO?F_6XsD}WnuGiuM9q|1(h!i@3=azsynHr3KJSdv zT7mg2FczT+{@zHvx8(26|E&dp+2@gxul#fAAn_W@-a|Fw;6~|Z;Z@Tr1u#y~VpRWj z{7$PiyL)5xPfm%al?q3xV?Ms4<=Ad({PSw__M0AGhilJPuGo55C5+xXc5_YK^Z-by1#f@U@!UmA|i`$ zazkSaOrZQOBmhAn=IdMDhwk23?+lPEa6JWBWG?Bd_81B2}LLJ*dQ;L5Z6rn9hC+nJmtNZKJ5`Zt5 zVtr|(RE{&fxnd$E%0Yx5UkozUyC6YI%vd|sOisM$uplFXf`eh%1R0JsytbwW3D?df z&^{8cut+BBb2R`&2+l@`)&Wt+S=3zPq07F-_ng=J4^+-y&I znu+ri^roWmy{U#pkL6r{#u(V1I@SB>Q!EuZq0(bWN$@H>#>>I+dM0<`nm0UNEAu1u zwkTJx%O}G<^YQ$bBv}T65m1KqiFcM@rCp2{F|jQ)iQ@6YD4PSr|8;LT?M(B|?CUct zv3$_Tj1Xe#E*&yrS6c+#rI0T2NKiZ;6(K7pe)iI5x1~y6I5ACXP)&xyiyzo<-Veo@%+q`^;$NJXomZs zBqSR_WrE1(ik__e(Ei* zA|2Vgmy!k>@5%6bHAafN4Tw>sP#u!ny>lmwzGdb(Ly81e`%AM=Ox!1Z9p?dPNYKq} z6`TI!PN?YV9gNfr{5FWW+39H0^>_!*?KW43(6_A1XwduH+k6>7AI>`k(BsNVNnsgA zj3oA;trs&Fb7rqL;8?MjnqN`T0?FUWUs4qhy9K2&KU50DmqG(il*D5%JJOo@E!HMA zJzW3{QYF8w}!}izP$SW}wJUfXwU)4|jK9XdtD^C; zXV3hdZLjBTdE`~y+S_PbfF3g@Uuc4%Xbe_MkaOiv*1sDTX7A**m-=a0nF)AS=g+s; z7uD6(VR}wii=Bc^#2ACEv`z;oy+Ifxfn?$}rznVb8{P@^ZH3N*Z~}a?>#}ASjB^|C z+*5+w-zQI=;O9lNBj9k=(#$U9^~F=57yJ15n3{6v(rV9CmK~#p!AmX*opX$l8^{4+#dIVn%^In@2u0{ZiSg3U)N*q8g;+Kj5Tg3QyCj>f7}h0 zCf||xdiQ#>H{fV58 z2M<`w%;DtT!_WUj%*@N{ui|q0ULnaPZ=n!zP7XQ{9kL`}7bnnBb-j77YkTY{9dfA zE%?t*7hfLRoZSqm6Hq6XZY;~}mam(b;#3knK)a2Oj9B#cwXo=)PWpU%J1uRXNi6vu zgb=UiLUCPY?&|6q_*o3Xvbpc|;l3C63{N$coX%?huSdzk)#UJNatrbD=~MguM`H5H z)z+9Px$@sHPD>m8@$p9pmF5*u!Wz(g%!XQ+G}YCAjEzB-oYeGwY#~w;p7md``%j^N z4_k_{2s%6_CX3`-XYjmFPE6bm3xj0`Hl!54n_gpI-1FnD#csZ+)INTE&Y_&}XVuez zrwo%)t!MkO^u-XcG|?q%cA5FVWB+0_H2PQ`4_J{~OC$9l0it`I^yXvjSUelTQlBuQ?;XNJuDDGp)X^ zZY8T%pk7e%{su*|UY6lLxu_Tc1VBw(d-F!e_mi8$&oR!d4Bb;XMii>-fL}T}AsNS5 zYOgR5L`psxq2>O0}#=mre5wY33g+?~WfF8Lcc-uSG-!Wd+mjQ++~c|Gw68SZms zQBhI2qqQ?uGyTa~j??k_EB&Ad4FXAzjDUav>%!5BcAs{AXc5UFf*Y?&CZ9tB%X9dSl0w~c4Y~|;5%Sr z_T#sI*`DtYToalI|FLJ!Zs`yS82j>2GE%rMj<@bzx6e>py{aIqfi(=h3}>fRiI>l> zIy)ZjbV<%An6F!3PB|w*Mx4owYqGnc@OTzn!{19?*KVr|##q3{i~IcV z8hpuc==#7Vje@0uAvz&}UZuPtkTX&{cS%PbIZer>^P+zrGxDI+0wBPaejU9OndKU}4Z`^43I0~dMYxqW6MpQW4?PQZ#Qt(Lw%6hi26=rfQLK7TLNe@=)&AU4XaUZ-CZ!{sc0z1(+8 z9yyZCON|X_*aaXzEBXx$8^gYjds~!tDc^jDHQu3kW3%2g?*Y1tWqo)CjgEzt9()K5 zOW>5%aax9k6Q)i6-AmrJow?N`cEzjHQ~9B;avR)yPZt&%Ua@hxp+wrTV~3B%JV;XH zvWK`7Foxgg?-8S0sNcMmaT?rGmK(Ikxq`!AplbpTOU0A4YU?NM6H3QE0OvZ{2yZI7 z4=}|_MrQt3mdV!>mYiA_IsdS9D}JcvzC z5Aj>iO;EFuNMLi40S;u?W&B$8mgiTSAKzG>o}d=naP~`5J$@U*xDmMxuq#gNL*MjG z&6Bxbh9^%(r=;X}Ivld9STnTp-4z=6^QYb2hL}f>xR{uRD>rVB$gOFS^=i={{J_M& z8p6uN#HhDe{XqM~R&}DP5-R;w#T%=AK__x@}Te#{R;HVfpU4zS#rA87~;4z6DK ztdrpJTUKaEQxW1|*GCN_>o;d-^XPLe^Wmk&0S~AKT(D2)+l-&*qUXilPDSh$iHL}h zVghQxy47C4q}Qh8kU(V0Y2VhVYu`Xd`TQB^m;$*=GkxotX*uQntjx@kSAXit%_I4^ zg0+J1Us>_Q$UOSI$`QD;cL5l~H7N-Tjcoy1$Ee&>*aWoNpm`Z}OVDK%;XES1dn|06 zlPbmU*UEhUs`djWy+29Wi_K zNBsms z3yrgJ_fw5vvp1|HZds!84JW!uUesW=Hkq*v{@eMK^BTU%eBqeOx=6E&eC z8sAV?SEW^0J7Qe!TU>7gHY_F{nSx9dxIuTMF_$R3y+9opOuC1O%)PKVfk{y=mx^# z1DQ0^lcy#pqZ>!z@W$$T_UwPW?u18=6fS&Xd3g7fx%s=bfl;&OD+f=-Gf@QCus=#U z+^-+lFQ$yP!-Xj_f>)w#;+mRb*(J4MoM_{;N8!xGvj|mls?T_}?>}8RCFcPJt_En& zJZ2OKA}oTXzF`66O`p!tk&%WYliajQGiD!0J!t)w+kV7bzi%X1{G_$@vmjI)M#4+W z?ZE$)mnS!q$R?}#u{eKxm)m{M10#9KP|2~L>5E{|#70Ns*h_NN?(s2Si3v?f58hl` z{BtPrycj!tqxH4`VNW2e{r26v&PF)|v0&Hy^@f7EXbHxC85#9gO2~+cfBGh8G>7MR zXzRSyf3y;>&hHx2?iat&{BP!8j}y9SuoucC=|%rIm&8-}_7^HT7ttx^2b_YR?Ig|9 zEfNYO!egZUHr8O*6i@gJ-!&G*geJT0i>}!!QH-uvKa72mHZOOeY5Pq@pmbwRPmho| zW?bnpn-R%b>t|v~^@7d(seEHg4T4)f(GJ*P2#Cg&Ukc6m7+g#^ry>iB7(#-5^~WNR zECmHMMOaHpO1iqbgaibTCW1CD<(RH0vm70NxAK^~exekV^Z+=KB49C9J(<%n>V~j6 ze90IV8=j^l#)S_$Nhf^hHikss#@OAluV|me-@U7QRpvGNvyJllXKV^5 zC{A<@k9Hqct82QyvDiLgpxi%}X;4#(3Jy)e8ozt2O!r^gQnJ#N4e*gId+w9^CZ8s5 z-FMdIwhB%$x7Bw%dQEfG3fBpu1kDa;V6u%Iz}a`^VGTcRX?dlv$GXr2`@^UkH~m)K zunS`B*r^QT>}LAP2!Ch>?Y*Rkq0aUEjfs{N7p_p9%qu1T%1$+Fh^KKV*{{qu@C>uG zPc31G;wFU7MxxK zA|Jne!9>5({TBx`mocb~!9;nO6Z`NXb(e)R4@#+F7wgaYuU{kW9eN0^G8YQMTHFk` z-&nKrrAr^1nsAFB9f)AWy34NCeKKb?E2TquokeVS!hP3eQNC!)Ll;!UStxD^Xr&yt z5yMfoB6=HC*bo7}ljcg0b+){CQ7BjqfhI&=o0;U38jAyr|fU!vTg&Fco2f52%&w z=MHXm*j!06e^toYyQY#QF3`}OMq7!l7G56hye)*3bn%(nNkM9=+(x+*WeLNU)!!DU>1p-MV|#lj6MXN23_UdC8|*t2%ZB zt5w?-rf!%(FoinSH;UqgC>p>BbTj^}gn_`&pBwmuP|ZpuLYZD4U4Lk_yv|z8X9Ff~ z&tre1IF9PY3QYd^h_EPEJl(vEx{oD->upJ}fj4RAni8co)8=hGJoFtL<{uoSh!C?W1@$PN^PZv*m?pHH?Ui`w(PVzEJ$;fC6L_-$sAu>Mss$MVhTwxC?CS5^E zdHGd${$K$u8ENT{qrXc(mzr>1?D1Rq+lo*o7(1wIbUew3*~+fD3OkRg0)^daxtA3q zWzN&4k8?(`OJvhgEye%6VOSH(^|&pCFY9ju&s$`b-Rxu?bx(LyshJS1&MPPiyeI68hk{Y)5M zhs#pZns@JK^WgxCKb@lxr~4Mk>_HwD8F`_w2XYUZa<$8C1I*VA+_l{;EMOD^84c)d zHJkO)tku+5&v_&5YuBW-qS3E}K;FJ+f;N*bm5riyj2iMnE8nHl!D{!TqkkiC8&gB7 zX%)sgq)J1;Ht1H?2ZM@f%>XO-&QK zrq(-wzFAvn_FJ9E!oT>56BGltyp%30{|v!ZrVvq^)5~4@3WNQy2wk6{0Dn~nV|L$h z?by|{o6V0$nw1B~GGlLkQezAwRzLsYq60&C)QwVhZHgI$2=3HYdrU53gbX12+SXMc zfR32$xpRfDUO@saAt7-_*KC;5@|M8yXaw#BddX_`m5%g94W?`=D*Pz_w(KKxccgy4 z&GRKQpJ8#q&CC=0=e{Jv*E7B-Mn`UA zLsw@0y4WG@%~A3UZJZVln+5Z6qTwBK$e;$>X;+KU^D%lffv%r9!^f=d#q01?fmvl{@i;y$bR4D z7#Kzk@|H=mO)fSbI_tA~X-+O!->Sc*D*SHcddUj*udh#^-jqGp{V9>d)R0d`M>u1< zC=-4EiNLyHmzIgefVYSGBTM_QSkZjeg9iDR`wgEKJ^CqXa*BMk(o$k#+O6DBn}dV_ z%h{6m=BjU;c4uoV9}7);Tid&!W&6$(@eQ3lJ%ANh&yrj_^v}~wdbzl{k+K9Pvbs97 zg2FMMLVA@Ru8>8IV4Nf&v7=TjSs#&Z@?90*H0Ca=IQVbBprbP(^+nR85i^O8wgqtV z$sBYtb+_YjdqSc0`G())Of&NMt2T=KTG6aYJ!-`juNlHbe0?;#7Al7?8US52JUrau zNvI%aH^M4J>`IJH*%4H}Lb?wL9%dGr3!h^5L#(?I)$+^zK%92UkRfKGI8zP>DCKbj z)4|C}Yj&I=^nMtl#OUUF<%gmof7P^;CL8k&H|)NoQ~k}NPIdYDcGgz4{%A<<*?&_` zy{_|51!T_-7~J0_D=OMeP}t7(C9+WvVa}QJXSB6H5qoJW^vK4L?69bjhSo#0)@DN4 zv(8DZ`d6GiAe8a&SjKLFCLv1-)FDE7LY4Xd9@h?I0*xEzZ9fue!|R`(%=xwJ`4xjC z#v;mZD&!UlQ<)F+l1yc)??0T)FzJYh-Nwuwk*?EGdsmZi@9@$y9PJylm9-^@--V7Re&=U)2! zb4S;y2ttfN1~|^)^PbeV?2`sY-aEUvg~>d%p#A3e=Bvl^{+fAHxdDGw#efqO*9A`K zFsjS3e&K3>p@2?sM6EY?bNa^uA4ipqi>2kNM5IJ^k}%#oq-5}AUov6|key-)W|x0O zb4zdawB;tZr^4gPMmnXIW9sTb#v)4BCu5xj5E17;XfL2OVAt8m&KJver^#~ht{(+M zjNFw{@8ZgiQ#KBh+|&oC0%x*SF>SlQhl|cvG?M`Y_O!Sn8?0?3Zen9&!$Jea&1{0? zfkzWb`=i-Iytd7DWmp|i)F+6YhOYi*0Xs)*uM9nYAmy}hD(OOv%5~?v?X_dSt6RiH zJT9uK-TJRbIX45=vvILQ+$KHB3Sr@Asi|b5H<0;PvvVORCHARKjD}>A2 zp}dSoKfH`JP&pngm7oigB$43)c0^V?dD~MLt`cLBb&aijvZs|=dwXvnvZVVkhqxHH z@S)NN!vYRtC?Lb55kE6 zA1zFLyIT-6)B)`6bmv}ueu<8R0*YGIXVdT@0w0`{o$w`+SL-*L29)a`qWRYLA3TWa z<(ONVECLv#YKM7K>p0PI9%yeT`4t_GYRbzb{lZos*eq;{6Y;lV+3ZvA%(%fv++6nC z6pEn~reC~`-DG^6{l}McT*ao5JJzU<>gXUnhf|IXqc-5Hn3$N32akndvYOk;f%z8c zRRQ`bDx%1JuzXBSt)?nDrn-NM8gdl;;5%AoP4KHq#HD+C)N%3a9#S1v`@jFfZc%ny zt?N5f)j+2r2rAG(iX-rzKu>pfcB}pWOwNxweffa!iAP5~USKs77k7n7FIDE^52ONG zoH_HgwUue-&eMj5{?|;Lk6*o7XqqRwZ=Xuf2VY>EA&wZikq{dzWwd`LP2qB^z(b-am$8$K;; z{)gVpsbz29HE~bC#K`Cx9-`dq`;t2Dx*da?(KlBd_-=lFwcO5-uL5=1HW1li0Jy(x zJ1D1HSw)P+Zmz9BKBu(YcNNzw2wvTW;8j&H~9A5g7t^v~XmfA|o9Upb>J z?$LtnTn+292ccnG@;++MVA}zY|2qH!fHx^Les0(+;f8ZFELZs2vDuhoC3tlBs+v>l z@gI@01D?<@!g7UiCaZ&)rj%_#X+{QREwHayDL3EXLB#^YcX(mnCfN>d?q6oK!6$D5 za9SkDr%8rB9Lb^dXQn?XC54CdpJD2U*bs}(Z_>_rsXfy$Npc4sa9qR$?Tr6OxHwyE zA>fz{^;TNO`6<8>DrDRGI~a%tR*MN-+&wB;bNK4dwdTz=231BD8d3+2Sx}eM4oWhw z@bQ{x%YbZACH!e%R&FjB2f-NSLQ@9nt?XK_wHWTVtS3+4%nteb?7o4924#3X$1_^# zPlcL_yA1&DLb+0Osa1(BOOSb_*3^Bz*X4CUC0zSfF&@kJ|=iVDX7PiJ+rlwDaV7>&6OJV$#o9 zR3ueySqWBy?4ZxWXuQ?#Xvw?3ckw0bLxevKl{nbXaKzy1bCMnzrPgr76 zjc~!DjKdK`T&UR5pU7@f<$ceO&MhO~2u79vko%0XQYWh&i!b@r3p0o25)m5sU%UWOo(+7-MZGMP#Cg#I>ts)xeL|56$iBV!Od?cL9bJ zjy$DCP%uG9+(9tx5%p=`!9~|7XGczx8a{m5TSG@EH zKqq)&Vm{>vRm4NxOvusU+n<5Ar*RafS+<=!Nrr6(Y{dYrVAzFi0#kSnnWXhPW`m$1 z4n9T?x*Y*3L0h`bZvd@4w^x-B$6;`}76->rWh5*6-AoBhA;pv<#C@m{bBN6J^p)8; z0Gj`cv|8l^T@5MmC?MTGG&PkY#L9(hIZ^=+7ml$NOANPF`nEsW#kWPAA3Q6F^+0SE z6%kq7j$Ih@`A^Hx8{S;*_dxLp_6HX|p>pa}`!A`#W~DBCF&~5cI3wSq8A&Q-Uffp$ z*3dwC9d>$$#$fh((p8V3ws!xF$H;g6lP@a&R6`}){#{mnijeUiH~>Ha_Fcli$FU+= zHuBD$-|IMsYDKSsHp9T9G+hZr>h#0}Ov@z;n$&g2NgJ%KI#C$#=O8 z{9CMwf1uny!iCV!KkNNM7>N@@*bnxc$(6R|p&3=4a03sUn9EXLpIGX$s;qDJSLsWB z^85r|?OtLc>LaN62glCfLKpUsoQQj1vRoPrXtJxVKgomLkMPH4HFzAYID!gD9Eg$8 zfBr6abK&P6Mh|h)BerLYaNO#bBeS4YzOXDe%`w6RJx&`kk{38RY#|WTgYO2$n@)%8 z&d#%EQ5Aj^N?GA~eE&X@dEQ`ALp(I1X32?6-2@4&3|R4x<`_Y-i^Kg}IAb}x;V+hf z^XIv70p>;DhFws4`Ybfbf{gri&es8-gmKc5o5D6?$31BpByc3D^UwP#HbinRgQ{As zC#3%fV+a>>!5H7o^<}~z?^vA&W@5s{#{oysP_3!vxw%62qI3%Z;Mvu=1{B&Ez0}%t67)jll7saO=~@Q z>eSYR_Y-bq@ILf7x}QAR#=D(Y>s4xMwWAFD$7Sa9=OU@=_WhrViS#I9)%*Wg?EcSk b_;>Synz&$td)g4>euS2WzWNI_>s$W|g1c0j literal 0 HcmV?d00001 diff --git a/src/EvoTrees.jl b/src/EvoTrees.jl index 9eaa9eea..ab59633a 100644 --- a/src/EvoTrees.jl +++ b/src/EvoTrees.jl @@ -1,7 +1,7 @@ module EvoTrees export init_evotree, grow_evotree!, grow_tree, fit_evotree, predict -export EvoTreeRegressor, EvoTreeCount, EvoTreeClassifier, EvoTreeGaussian, +export EvoTreeRegressor, EvoTreeCount, EvoTreeClassifier, EvoTreeMLE, EvoTreeGaussian, importance, Random using Base.Threads: @threads diff --git a/src/MLJ.jl b/src/MLJ.jl index 7599c2c5..3ecc1bb3 100644 --- a/src/MLJ.jl +++ b/src/MLJ.jl @@ -65,6 +65,16 @@ function predict(::EvoTreeGaussian, fitresult, A) return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] end +function predict(::EvoTreeMLE{L,T,S}, fitresult, A) where {L<:GaussianDist,T,S} + pred = predict(fitresult, A.matrix) + return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] +end + +function predict(::EvoTreeMLE{L,T,S}, fitresult, A) where {L<:LogisticDist,T,S} + pred = predict(fitresult, A.matrix) + @info pred + return [Distributions.Logistic(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] +end # Feature Importances MMI.reports_feature_importances(::Type{<:EvoTypes}) = true @@ -76,10 +86,11 @@ end # Metadata -const EvoTreeRegressor_desc = "Regression models with various underlying methods: least square, quantile, logistic, gamma, tweedie." +const EvoTreeRegressor_desc = "Regression models with various underlying methods: least square, quantile, logistic, gamma (deviance), tweedie (deviance)." const EvoTreeClassifier_desc = "Multi-classification with softmax and cross-entropy loss." -const EvoTreeCount_desc = "Poisson regression fitting Ξ» with max likelihood." -const EvoTreeGaussian_desc = "Gaussian maximum likelihood of ΞΌ and Οƒ." +const EvoTreeCount_desc = "Poisson regression fitting Ξ» with deviance minimization." +const EvoTreeGaussian_desc = "Deprecated - Use EvoTreeMLE with `loss=:normal` instead. Gaussian maximum likelihood of ΞΌ and Οƒ." +const EvoTreeMLE_desc = "Maximum likelihood methods supporting Normal/Gaussian and Logistic distributions." MMI.metadata_pkg.((EvoTreeRegressor, EvoTreeClassifier, EvoTreeCount, EvoTreeGaussian), name="EvoTrees", @@ -117,6 +128,13 @@ MMI.metadata_model(EvoTreeGaussian, path="EvoTrees.EvoTreeGaussian", descr=EvoTreeGaussian_desc) +MMI.metadata_model(EvoTreeMLE, + input_scitype=Union{MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor),AbstractMatrix{MMI.Continuous}}, + target_scitype=AbstractVector{<:MMI.Continuous}, + weights=false, + path="EvoTrees.EvoTreeMLE", + descr=EvoTreeMLE_desc) + """ EvoTreeRegressor(;kwargs...) @@ -173,6 +191,12 @@ Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of siz EvoTrees.predict(model, X) ``` +Alternatively, models act as a functor, returning predictions when called as a function with features as argument: + +```julia +model(X) +``` + # MLJ Interface From MLJ, the type can be imported using: @@ -280,6 +304,12 @@ Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of siz EvoTrees.predict(model, X) ``` +Alternatively, models act as a functor, returning predictions when called as a function with features as argument: + +```julia +model(X) +``` + # MLJ From MLJ, the type can be imported using: @@ -395,6 +425,12 @@ Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of siz EvoTrees.predict(model, X) ``` +Alternatively, models act as a functor, returning predictions when called as a function with features as argument: + +```julia +model(X) +``` + # MLJ From MLJ, the type can be imported using: @@ -475,7 +511,7 @@ EvoTreeCount EvoTreeGaussian(;kwargs...) A model type for constructing a EvoTreeGaussian, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API the MLJ model interface. -EvoTreeGaussian is used to perform Gaussain probabilistic regression, fitting ΞΌ and Οƒ parameters to maximize likelihood. +EvoTreeGaussian is used to perform Gaussian probabilistic regression, fitting ΞΌ and Οƒ parameters to maximize likelihood. # Hyper-parameters @@ -516,6 +552,12 @@ Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of siz EvoTrees.predict(model, X) ``` +Alternatively, models act as a functor, returning predictions when called as a function with features as argument: + +```julia +model(X) +``` + # MLJ From MLJ, the type can be imported using: @@ -594,6 +636,140 @@ preds = predict_median(mach, X) """ EvoTreeGaussian + + +""" + EvoTreeMLE(;kwargs...) + +A model type for constructing a EvoTreeMLE, based on [EvoTrees.jl](https://github.com/Evovest/EvoTrees.jl), and implementing both an internal API the MLJ model interface. +EvoTreeMLE performs maximum likelihood estimation. Assumed distributions is specified through `loss` kwargs. Both Normal/Gaussian and Logistic distributions are supported. + +# Hyper-parameters + +`loss=:gaussian`: Loss to be be minimized during training. One of: + - `:normal`/ `gaussian` + - `:logistic` +- `nrounds=10`: Number of rounds. It corresponds to the number of trees that will be sequentially stacked. +- `lambda::T=0.0`: L2 regularization term on weights. Must be >= 0. Higher lambda can result in a more robust model. +- `gamma::T=0.0`: Minimum gain imprvement needed to perform a node split. Higher gamma can result in a more robust model. +- `max_depth=5`: Maximum depth of a tree. Must be >= 1. A tree of depth 1 is made of a single prediction leaf. + A complete tree of depth N contains `2^(N - 1)` terminal leaves and `2^(N - 1) - 1` split nodes. + Compute cost is proportional to 2^max_depth. Typical optimal values are in the 3 to 9 range. +- `min_weight=0.0`: Minimum weight needed in a node to perform a split. Matches the number of observations by default or the sum of weights as provided by the `weights` vector. +- `rowsample=1.0`: Proportion of rows that are sampled at each iteration to build the tree. Should be in `]0, 1]`. +- `colsample=1.0`: Proportion of columns / features that are sampled at each iteration to build the tree. Should be in `]0, 1]`. +- `nbins=32`: Number of bins into which each feature is quantized. Buckets are defined based on quantiles, hence resulting in equal weight bins. +- `monotone_constraints=Dict{Int, Int}()`: Specify monotonic constraints using a dict where the key is the feature index and the value the applicable constraint (-1=decreasing, 0=none, 1=increasing). + !Experimental feature: note that for Gaussian regression, constraints may not be enforce systematically. +- `rng=123`: Either an integer used as a seed to the random number generator or an actual random number generator (`::Random.AbstractRNG`). +- `metric::Symbol=:none`: Metric that is to be tracked during the training process. One of: `:none`, `:gaussian`. +- `device="cpu"`: Hardware device to use for computations. Can be either `"cpu"` or `"gpu"`. + +# Internal API + +Do `params = EvoTreeMLE()` to construct an instance with default hyper-parameters. +Provide keyword arguments to override hyper-parameter defaults, as in EvoTreeMLE(max_depth=...). + +## Training model + +A model is built using [`fit_evotree`](@ref): + +```julia +fit_evotree(params, X_train, Y_train, W_train=nothing; kwargs...). +``` + +## Inference + +Predictions are obtained using [`predict`](@ref) which returns a `Matrix` of size `[nobs, nparams]` where the second dimensions refer to `ΞΌ` & `Οƒ` for Normal/Gaussian and `ΞΌ` & `s` for Logistic. + +```julia +EvoTrees.predict(model, X) +``` + +Alternatively, models act as a functor, returning predictions when called as a function with features as argument: + +```julia +model(X) +``` + +# MLJ + +From MLJ, the type can be imported using: + +```julia +EvoTreeGaussian = @load EvoTreeGaussian pkg=EvoTrees +``` + +Do `model = EvoTreeGaussian()` to construct an instance with default hyper-parameters. +Provide keyword arguments to override hyper-parameter defaults, as in `EvoTreeGaussian(loss=...)`. + +## Training data + +In MLJ or MLJBase, bind an instance `model` to data with + + mach = machine(model, X, y) + +where + +- `X`: any table of input features (eg, a `DataFrame`) whose columns + each have one of the following element scitypes: `Continuous`, + `Count`, or `<:OrderedFactor`; check column scitypes with `schema(X)` + +- `y`: is the target, which can be any `AbstractVector` whose element + scitype is `<:Continuous`; check the scitype + with `scitype(y)` + +Train the machine using `fit!(mach, rows=...)`. + +## Operations + +- `predict(mach, Xnew)`: returns a vector of Gaussian distributions given features `Xnew` having the same scitype as `X` above. +Predictions are probabilistic. + +Specific metrics can also be predicted using: + + - `predict_mean(mach, Xnew)` + - `predict_mode(mach, Xnew)` + - `predict_median(mach, Xnew)` + +## Fitted parameters + +The fields of `fitted_params(mach)` are: + + - `:fitresult`: The `GBTree` object returned by EvoTrees.jl fitting algorithm. + +## Report + +The fields of `report(mach)` are: + - `:features`: The names of the features encountered in training. + +# Examples + +``` +# Internal API +using EvoTrees +params = EvoTreeGaussian(max_depth=5, nbins=32, nrounds=100) +nobs, nfeats = 1_000, 5 +X, y = randn(nobs, nfeats), rand(nobs) +model = fit_evotree(params, X, y) +preds = EvoTrees.predict(model, X) +``` + +``` +# MLJ Interface +using MLJ +EvoTreeGaussian = @load EvoTreeGaussian pkg=EvoTrees +model = EvoTreeGaussian(max_depth=5, nbins=32, nrounds=100) +X, y = @load_boston +mach = machine(model, X, y) |> fit! +preds = predict(mach, X) +preds = predict_mean(mach, X) +preds = predict_mode(mach, X) +preds = predict_median(mach, X) +``` +""" +EvoTreeMLE + # function MLJ.clean!(model::EvoTreeRegressor) # warning = "" # if model.nrounds < 1 diff --git a/src/eval.jl b/src/eval.jl index 87872292..9db5f99d 100644 --- a/src/eval.jl +++ b/src/eval.jl @@ -86,6 +86,15 @@ function eval_metric(::Val{:gaussian}, p::AbstractMatrix{T}, y::AbstractVector{T return eval end +function eval_metric(::Val{:logistic}, p::AbstractMatrix{T}, y::AbstractVector{T}, w::AbstractVector{T}, alpha=0.0) where {T<:AbstractFloat} + eval = zero(T) + @inbounds for i in eachindex(y) + eval += -w[i] * (log(1 / 4 * sech(exp(-p[2, i]) * (y[i] - p[1, i]))^2) - p[2, i]) + end + eval /= sum(w) + return eval +end + function eval_metric(::Val{:quantile}, p::AbstractMatrix{T}, y::AbstractVector{T}, w::AbstractVector{T}, alpha=0.0) where {T<:AbstractFloat} eval = zero(T) for i in eachindex(y) diff --git a/src/find_split.jl b/src/find_split.jl index 62302566..cc82df7d 100644 --- a/src/find_split.jl +++ b/src/find_split.jl @@ -121,7 +121,7 @@ end """ update_hist! - GaussianRegression + MLE2P """ function update_hist!( ::Type{L}, @@ -129,7 +129,7 @@ function update_hist!( δ𝑀::Matrix{T}, X_bin::Matrix{UInt8}, 𝑖::AbstractVector{S}, - 𝑗::AbstractVector{S}, K) where {L<:GaussianRegression,T,S} + 𝑗::AbstractVector{S}, K) where {L<:MLE2P,T,S} @threads for j in 𝑗 @inbounds @simd for i in 𝑖 @@ -246,9 +246,9 @@ end """ hist_gains_cpu! - GaussianRegression + MLE2P """ -function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR::Vector{T}, params, K, monotone_constraint) where {L<:GaussianRegression,T} +function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR::Vector{T}, params, K, monotone_constraint) where {L<:MLE2P,T} @inbounds for bin in 1:params.nbins i = 5 * bin - 4 # update gain only if there's non null weight on each of left and right side - except for nbins level, which is used as benchmark for split criteria (gain if no split) diff --git a/src/fit.jl b/src/fit.jl index 762ef05f..92a3bf8d 100644 --- a/src/fit.jl +++ b/src/fit.jl @@ -13,7 +13,7 @@ function init_evotree(params::EvoTypes{L,T,S}, X::AbstractMatrix, Y::AbstractVec Y = T.(Y) ΞΌ = [logit(mean(Y))] !isnothing(offset) && (offset .= logit.(offset)) - elseif L ∈ [Poisson, Gamma, Tweedie] + elseif L in [Poisson, Gamma, Tweedie] Y = T.(Y) ΞΌ = fill(log(mean(Y)), 1) !isnothing(offset) && (offset .= log.(offset)) @@ -31,15 +31,21 @@ function init_evotree(params::EvoTypes{L,T,S}, X::AbstractMatrix, Y::AbstractVec Y = UInt32.(CategoricalArrays.levelcode.(yc)) end !isnothing(offset) && (offset .= log.(offset)) - elseif L == Gaussian + elseif L == GaussianDist K = 2 Y = T.(Y) ΞΌ = [mean(Y), log(std(Y))] !isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2])) + elseif L == LogisticDist + K = 2 + Y = T.(Y) + ΞΌ = [mean(Y), log(std(Y) * sqrt(3) / Ο€)] + !isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2])) else Y = T.(Y) ΞΌ = [mean(Y)] end + ΞΌ = T.(ΞΌ) # force a neutral bias/initial tree when offset is specified !isnothing(offset) && (ΞΌ .= 0) @@ -229,7 +235,7 @@ Main training function. Performs model fitting given configuration `params`, `x_ # Arguments -- `params::EvoTypes`: configuration info providing hyper-paramters. `EvoTypes` comprises EvoTreeRegressor, EvoTreeClassifier, EvoTreeCount or EvoTreeGaussian +- `params::EvoTypes`: configuration info providing hyper-paramters. `EvoTypes` comprises EvoTreeRegressor, EvoTreeClassifier, EvoTreeCount and EvoTreeMLE. # Keyword arguments @@ -241,9 +247,21 @@ Main training function. Performs model fitting given configuration `params`, `x_ - `y_eval::Vector`: vector of evaluation targets of length `#observations`. - `w_eval::Vector`: vector of evaluation weights of length `#observations`. Defaults to `nothing` (assumes a vector of 1s). - `offset_eval::VecOrMat`: evaluation data offset. Should match the size of the predictions. +- `metric`: The evaluation metric that wil be tracked on `x_eval`, `y_eval` and optionally `w_eval` / `offset_eval` data. + Supported metrics are: + + - `:mse`: mean-squared error. Adapted for general regression models. + - `:rmse`: root-mean-squared error (CPU only). Adapted for general regression models. + - `:mae`: mean absolute error. Adapted for general regression models. + - `:logloss`: Adapted for `:logistic` regression models. + - `:mlogloss`: Multi-class cross entropy. Adapted to `EvoTreeClassifier` classification models. + - `:poisson`: Poisson deviance. Adapted to `EvoTreeCount` count models. + - `:gamma`: Gamma deviance. Adapted to regression problem on Gamma like, positively distributed targets. + - `:tweedie`: Tweedie deviance. Adapted to regression problem on Tweedie like, positively distributed targets with probability mass at `y == 0`. - `early_stopping_rounds::Integer`: number of consecutive rounds without metric improvement after which fitting in stopped. - `print_every_n`: sets at which frequency logging info should be printed. - `verbosity`: set to 1 to print logging info during training. +- `fnames`: the names of the `x_train` features. If provided, should be a vector of string with `length(fnames) = size(x_train, 2)`. """ function fit_evotree(params::EvoTypes{L,T,S}; x_train::AbstractMatrix, y_train::AbstractVector, w_train=nothing, offset_train=nothing, @@ -264,10 +282,11 @@ function fit_evotree(params::EvoTypes{L,T,S}; L == Logistic && (offset_eval .= logit.(offset_eval)) L in [Poisson, Gamma, Tweedie] && (offset_eval .= log.(offset_eval)) L == Softmax && (offset_eval .= log.(offset_eval)) - L == Gaussian && (offset_eval[:, 2] .= log.(offset_eval[:, 2])) + L in [GaussianDist, LogisticDist] && (offset_eval[:, 2] .= log.(offset_eval[:, 2])) offset_eval = T.(offset_eval) end + !isnothing(metric) ? metric = Symbol(metric) : nothing if !isnothing(metric) && !isnothing(x_eval) && !isnothing(y_eval) if params.device == "gpu" x_eval = CuArray(T.(x_eval)) diff --git a/src/gpu/find_split_gpu.jl b/src/gpu/find_split_gpu.jl index 58278ffb..b6a29678 100644 --- a/src/gpu/find_split_gpu.jl +++ b/src/gpu/find_split_gpu.jl @@ -111,7 +111,7 @@ function update_hist_gpu!( X_bin::CuMatrix{UInt8}, 𝑖::CuVector{S}, 𝑗::CuVector{S}, K; - MAX_THREADS=128) where {L<:GaussianRegression,T,S} + MAX_THREADS=128) where {L<:MLE2P,T,S} nbins = size(h, 2) thread_i = max(nbins, min(MAX_THREADS, length(𝑖))) @@ -253,13 +253,13 @@ end """ update_gains! - GaussianRegression + MLE2P """ function update_gains_gpu!( node::TrainNodeGPU, 𝑗::AbstractVector, params::EvoTypes{L,T,S}, K, monotone_constraints; - MAX_THREADS=512) where {L<:GaussianRegression,T,S} + MAX_THREADS=512) where {L<:MLE2P,T,S} cumsum!(node.hL, node.h, dims=2) node.hR .= view(node.hL, :, params.nbins:params.nbins, :) .- node.hL diff --git a/src/gpu/fit_gpu.jl b/src/gpu/fit_gpu.jl index da6c33d4..38c16224 100644 --- a/src/gpu/fit_gpu.jl +++ b/src/gpu/fit_gpu.jl @@ -1,5 +1,5 @@ function init_evotree_gpu(params::EvoTypes{L,T,S}, - X::AbstractMatrix, Y::AbstractVector, W=nothing, offset=nothing; fnames) where {L,T,S} + X::AbstractMatrix, Y::AbstractVector, W=nothing, offset=nothing; fnames=nothing) where {L,T,S} K = 1 levels = nothing @@ -13,7 +13,7 @@ function init_evotree_gpu(params::EvoTypes{L,T,S}, Y = CuArray(T.(Y)) ΞΌ = fill(log(mean(Y)), 1) !isnothing(offset) && (offset .= log.(offset)) - elseif L == Gaussian + elseif L == GaussianDist K = 2 Y = CuArray(T.(Y)) ΞΌ = [mean(Y), log(std(Y))] diff --git a/src/gpu/loss_gpu.jl b/src/gpu/loss_gpu.jl index 6712bdfd..e6de3f64 100644 --- a/src/gpu/loss_gpu.jl +++ b/src/gpu/loss_gpu.jl @@ -4,8 +4,8 @@ function get_gain_gpu(::Type{L}, βˆ‘::AbstractVector{T}, lambda::T) where {L<:Gr return gain end -# Gaussian regression -function get_gain_gpu(::Type{L}, βˆ‘::AbstractVector{T}, lambda::T) where {L<:GaussianRegression,T<:AbstractFloat} +# MLE regression +function get_gain_gpu(::Type{L}, βˆ‘::AbstractVector{T}, lambda::T) where {L<:MLE2P,T<:AbstractFloat} gain = βˆ‘[1]^2 / (βˆ‘[3] + lambda * βˆ‘[5]) / 2 + βˆ‘[2]^2 / (βˆ‘[4] + lambda * βˆ‘[5]) / 2 return gain end @@ -129,7 +129,7 @@ function kernel_gauss_δ𝑀!(δ𝑀::CuDeviceMatrix, p::CuDeviceMatrix, y::CuDe return end -function update_grads_gpu!(::Type{Gaussian}, δ𝑀::CuMatrix, p::CuMatrix, y::CuVector; MAX_THREADS=1024) +function update_grads_gpu!(::Type{GaussianDist}, δ𝑀::CuMatrix, p::CuMatrix, y::CuVector; MAX_THREADS=1024) threads = min(MAX_THREADS, length(y)) blocks = ceil(Int, (length(y)) / threads) @cuda blocks = blocks threads = threads kernel_gauss_δ𝑀!(δ𝑀, p, y) diff --git a/src/gpu/predict_gpu.jl b/src/gpu/predict_gpu.jl index e7557661..ec0672d1 100644 --- a/src/gpu/predict_gpu.jl +++ b/src/gpu/predict_gpu.jl @@ -52,9 +52,9 @@ end """ predict_kernel! - GaussianRegression + MLE2P """ -function predict_kernel!(::Type{L}, pred::AbstractMatrix{T}, split, feat, cond_float, leaf_pred::AbstractMatrix{T}, X::CuDeviceMatrix, K) where {L<:GaussianRegression,T} +function predict_kernel!(::Type{L}, pred::AbstractMatrix{T}, split, feat, cond_float, leaf_pred::AbstractMatrix{T}, X::CuDeviceMatrix, K) where {L<:MLE2P,T} idx = threadIdx().x + (blockIdx().x - 1) * blockDim().x nid = 1 @inbounds if idx <= size(pred, 2) @@ -97,7 +97,7 @@ function predict(model::GBTreeGPU{L,T,S}, X::AbstractMatrix) where {L,T,S} pred .= sigmoid.(pred) elseif L ∈ [Poisson, Gamma, Tweedie] pred .= exp.(pred) - elseif L == Gaussian + elseif L == GaussianDist pred[2, :] .= exp.(pred[2, :]) elseif L == Softmax pred = transpose(reshape(pred, model.K, :)) @@ -119,12 +119,12 @@ function pred_scalar_gpu!(βˆ‘::AbstractVector{T}, lambda) where {L<:GradientRegr @allowscalar(-βˆ‘[1] / (βˆ‘[2] + lambda * βˆ‘[3])) end -# prediction in Leaf - Gaussian -function pred_leaf_gpu!(p::AbstractMatrix{T}, n, βˆ‘::AbstractVector{T}, params::EvoTypes{L,T,S}) where {L<:GaussianRegression,T,S} +# prediction in Leaf - MLE2P +function pred_leaf_gpu!(p::AbstractMatrix{T}, n, βˆ‘::AbstractVector{T}, params::EvoTypes{L,T,S}) where {L<:MLE2P,T,S} @allowscalar(p[1, n] = -params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5])) @allowscalar(p[2, n] = -params.eta * βˆ‘[2] / (βˆ‘[4] + params.lambda * βˆ‘[5])) return nothing end -function pred_scalar_gpu!(βˆ‘::AbstractVector{T}, params::EvoTypes{L,T,S}) where {L<:GradientRegression,T,S} +function pred_scalar_gpu!(βˆ‘::AbstractVector{T}, params::EvoTypes{L,T,S}) where {L<:MLE2P,T,S} @allowscalar(-params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5])) end \ No newline at end of file diff --git a/src/importance.jl b/src/importance.jl index 4d47152e..e90080dc 100644 --- a/src/importance.jl +++ b/src/importance.jl @@ -10,6 +10,7 @@ end importance(model::GBTree) Sorted normalized feature importance based on loss function gain. +Feature names associated to the model are stored in `model.info[:fnames]` as a string `Vector` and can be updated at any time. Eg: `model.info[:fnames] = new_fnames_vec`. """ function importance(model::Union{GBTree,GBTreeGPU}) fnames = model.info[:fnames] diff --git a/src/loss.jl b/src/loss.jl index f195aba4..74da719b 100644 --- a/src/loss.jl +++ b/src/loss.jl @@ -79,14 +79,31 @@ end # Gaussian - http://jrmeyer.github.io/machinelearning/2017/08/18/mle.html # pred[i][1] = ΞΌ # pred[i][2] = log(Οƒ) -function update_grads!(::Type{Gaussian}, δ𝑀::Matrix, p::Matrix, y::Vector; kwargs...) +function update_grads!(::Type{GaussianDist}, δ𝑀::Matrix, p::Matrix, y::Vector; kwargs...) + Ο΅ = eltype(p)(2e-7) @inbounds @simd for i in eachindex(y) # first order δ𝑀[1, i] = (p[1, i] - y[i]) / exp(2 * p[2, i]) * δ𝑀[5, i] δ𝑀[2, i] = (1 - (p[1, i] - y[i])^2 / exp(2 * p[2, i])) * δ𝑀[5, i] # second order - δ𝑀[3, i] = δ𝑀[5, i] / exp(2 * p[2, i]) - δ𝑀[4, i] = 2 * δ𝑀[5, i] / exp(2 * p[2, i]) * (p[1, i] - y[i])^2 + δ𝑀[3, i] = max(Ο΅, δ𝑀[5, i] / exp(2 * p[2, i])) + δ𝑀[4, i] = max(Ο΅, δ𝑀[5, i] * 2 / exp(2 * p[2, i]) * (p[1, i] - y[i])^2) + end +end + +# LogisticProb - https://en.wikipedia.org/wiki/Logistic_distribution +# pdf = +# pred[i][1] = ΞΌ +# pred[i][2] = log(s) +function update_grads!(::Type{LogisticDist}, δ𝑀::Matrix, p::Matrix, y::Vector; kwargs...) + Ο΅ = eltype(p)(2e-7) + @inbounds @simd for i in eachindex(y) + # first order + δ𝑀[1, i] = -tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) * exp(-p[2, i]) * δ𝑀[5, i] + δ𝑀[2, i] = -(exp(-p[2, i]) * (y[i] - p[1, i]) * tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) - 1) * δ𝑀[5, i] + # second order + δ𝑀[3, i] = max(Ο΅, sech((y[i] - p[1, i]) / (2 * exp(p[2, i])))^2 / (2 * exp(2 * p[2, i])) * δ𝑀[5, i]) + δ𝑀[4, i] = max(Ο΅, (exp(-2 * p[2, i]) * (p[1, i] - y[i]) * (p[1, i] - y[i] + exp(p[2, i]) * sinh(exp(-p[2, i]) * (p[1, i] - y[i])))) / (1 + cosh(exp(-p[2, i]) * (p[1, i] - y[i]))) * δ𝑀[5, i]) end end @@ -121,7 +138,7 @@ function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:GradientRegress end # GaussianRegression -function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:GaussianRegression,T<:AbstractFloat} +function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:MLE2P,T<:AbstractFloat} (βˆ‘[1]^2 / (βˆ‘[3] + Ξ» * βˆ‘[5]) + βˆ‘[2]^2 / (βˆ‘[4] + Ξ» * βˆ‘[5])) / 2 end @@ -151,7 +168,7 @@ function update_childs_βˆ‘!(::Type{L}, nodes, n, bin, feat, K) where {L<:Union{G return nothing end -function update_childs_βˆ‘!(::Type{L}, nodes, n, bin, feat, K) where {L<:GaussianRegression} +function update_childs_βˆ‘!(::Type{L}, nodes, n, bin, feat, K) where {L<:MLE2P} nodes[n<<1].βˆ‘ .= nodes[n].hL[feat][(5*bin-4):(5*bin)] nodes[n<<1+1].βˆ‘ .= nodes[n].hR[feat][(5*bin-4):(5*bin)] return nothing diff --git a/src/models.jl b/src/models.jl index acb244cc..0682f062 100644 --- a/src/models.jl +++ b/src/models.jl @@ -3,7 +3,7 @@ abstract type GradientRegression <: ModelType end abstract type L1Regression <: ModelType end abstract type QuantileRegression <: ModelType end abstract type MultiClassRegression <: ModelType end -abstract type GaussianRegression <: ModelType end +abstract type MLE2P <: ModelType end # 2-parameters max-likelihood struct Linear <: GradientRegression end struct Logistic <: GradientRegression end struct Poisson <: GradientRegression end @@ -12,7 +12,8 @@ struct Tweedie <: GradientRegression end struct L1 <: L1Regression end struct Quantile <: QuantileRegression end struct Softmax <: MultiClassRegression end -struct Gaussian <: GaussianRegression end +struct GaussianDist <: MLE2P end +struct LogisticDist <: MLE2P end # make a Random Number Generator object mk_rng(rng::Random.AbstractRNG) = rng @@ -246,7 +247,7 @@ function EvoTreeClassifier(; kwargs...) return model end -mutable struct EvoTreeGaussian{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Probabilistic +mutable struct EvoTreeMLE{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Probabilistic nrounds::S lambda::T gamma::T @@ -262,6 +263,86 @@ mutable struct EvoTreeGaussian{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Prob device end +function EvoTreeMLE(; kwargs...) + + # defaults arguments + args = Dict{Symbol,Any}( + :T => Float64, + :loss => :gaussian, + :nrounds => 10, + :lambda => 0.0, + :gamma => 0.0, # min gain to split + :eta => 0.1, # learning rate + :max_depth => 5, + :min_weight => 1.0, # minimal weight, different from xgboost (but same for linear) + :rowsample => 1.0, + :colsample => 1.0, + :nbins => 32, + :alpha => 0.5, + :monotone_constraints => Dict{Int,Int}(), + :rng => 123, + :device => "cpu" + ) + + args_ignored = setdiff(keys(kwargs), keys(args)) + args_ignored_str = join(args_ignored, ", ") + length(args_ignored) > 0 && @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." + + args_default = setdiff(keys(args), keys(kwargs)) + args_default_str = join(args_default, ", ") + length(args_default) > 0 && @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." + + args_override = intersect(keys(args), keys(kwargs)) + for arg in args_override + args[arg] = kwargs[arg] + end + + args[:rng] = mk_rng(args[:rng])::Random.AbstractRNG + args[:loss] = Symbol(args[:loss]) + T = args[:T] + + if args[:loss] in [:gaussian, :normal] + L = GaussianDist + elseif args[:loss] == :logistic + L = LogisticDist + else + error("Invalid loss: $(args[:loss]). Only `:normal`, `:gaussian` and `:logistic` are supported at the moment by EvoTreeMLE.") + end + + model = EvoTreeMLE{L,T,Int}( + args[:nrounds], + T(args[:lambda]), + T(args[:gamma]), + T(args[:eta]), + args[:max_depth], + T(args[:min_weight]), + T(args[:rowsample]), + T(args[:colsample]), + args[:nbins], + T(args[:alpha]), + args[:monotone_constraints], + args[:rng], + args[:device]) + + return model +end + + +mutable struct EvoTreeGaussian{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Probabilistic + nrounds::S + lambda::T + gamma::T + eta::T + max_depth::S + min_weight::T # real minimum number of observations, different from xgboost (but same for linear) + rowsample::T # subsample + colsample::T + nbins::S + alpha::T + monotone_constraints + rng + device +end function EvoTreeGaussian(; kwargs...) # defaults arguments @@ -296,7 +377,7 @@ function EvoTreeGaussian(; kwargs...) end args[:rng] = mk_rng(args[:rng])::Random.AbstractRNG - L = Gaussian + L = GaussianDist T = args[:T] model = EvoTreeGaussian{L,T,Int}( @@ -318,4 +399,4 @@ function EvoTreeGaussian(; kwargs...) end # const EvoTypes = Union{EvoTreeRegressor,EvoTreeCount,EvoTreeClassifier,EvoTreeGaussian} -const EvoTypes{L,T,S} = Union{EvoTreeRegressor{L,T,S},EvoTreeCount{L,T,S},EvoTreeClassifier{L,T,S},EvoTreeGaussian{L,T,S}} +const EvoTypes{L,T,S} = Union{EvoTreeRegressor{L,T,S},EvoTreeCount{L,T,S},EvoTreeClassifier{L,T,S},EvoTreeGaussian{L,T,S},EvoTreeMLE{L,T,S}} \ No newline at end of file diff --git a/src/predict.jl b/src/predict.jl index 8160a429..415f1d48 100644 --- a/src/predict.jl +++ b/src/predict.jl @@ -20,7 +20,7 @@ function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L<:Logistic,T} return nothing end -function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L<:GaussianRegression,T} +function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L<:MLE2P,T} @inbounds @threads for i in axes(X, 1) nid = 1 @inbounds while tree.split[nid] @@ -75,7 +75,7 @@ function predict(model::GBTree{L,T,S}, X::AbstractMatrix) where {L,T,S} pred .= sigmoid.(pred) elseif L ∈ [Poisson, Gamma, Tweedie] pred .= exp.(pred) - elseif L == Gaussian + elseif L in [GaussianDist, LogisticDist] pred[2, :] .= exp.(pred[2, :]) elseif L == Softmax @inbounds for i in axes(pred, 2) @@ -94,12 +94,12 @@ function pred_scalar_cpu!(βˆ‘::Vector{T}, params::EvoTypes, K) where {L<:Gradien -params.eta * βˆ‘[1] / (βˆ‘[2] + params.lambda * βˆ‘[3]) end -# prediction in Leaf - GaussianRegression -function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:GaussianRegression,T,S} +# prediction in Leaf - MLE2P +function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:MLE2P,T,S} pred[1, n] = -params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5]) pred[2, n] = -params.eta * βˆ‘[2] / (βˆ‘[4] + params.lambda * βˆ‘[5]) end -function pred_scalar_cpu!(βˆ‘::Vector{T}, params::EvoTypes{L,T,S}, K) where {L<:GaussianRegression,T,S} +function pred_scalar_cpu!(βˆ‘::Vector{T}, params::EvoTypes{L,T,S}, K) where {L<:MLE2P,T,S} -params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5]) end diff --git a/test/MLJ.jl b/test/MLJ.jl index a19dc22b..6383c5dc 100644 --- a/test/MLJ.jl +++ b/test/MLJ.jl @@ -146,7 +146,6 @@ Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval] # @load EvoTreeRegressor tree_model = EvoTreeGaussian( - loss=:gaussian, metric=:gaussian, nrounds=10, lambda=0.0, gamma=0.0, eta=0.1, max_depth=6, min_weight=1.0, @@ -173,6 +172,52 @@ q_20 = quantile.(pred, 0.80) report(mach) +################################################## +### Logistic - Larger data +################################################## +features = rand(10_000, 10) +X = features +Y = rand(size(X, 1)) +𝑖 = collect(1:size(X, 1)) + +# train-eval split +𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +train_size = 0.8 +𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] +𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] + +x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :] +y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] + +# @load EvoTreeRegressor +tree_model = EvoTreeMLE( + loss=:logistic, + nrounds=10, + lambda=1.0, gamma=0.0, eta=0.1, + max_depth=6, min_weight=1.0, + rowsample=0.5, colsample=0.5, nbins=32) + +X = MLJBase.table(X) + +# typeof(X) +mach = machine(tree_model, X, Y) +train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split +fit!(mach, rows=train, verbosity=1, force=true) + +mach.model.nrounds += 10 +fit!(mach, rows=train, verbosity=1) + +pred = predict(mach, selectrows(X, train)) +pred_mean = predict_mean(mach, selectrows(X, train)) +pred_mode = predict_mode(mach, selectrows(X, train)) +# pred_mode = predict_median(mach, selectrows(X,train)) +mean(abs.(pred_mean - selectrows(Y, train))) + +q_20 = quantile.(pred, 0.20) +q_20 = quantile.(pred, 0.80) + +report(mach) + ############################ # Added in response to #92 # ############################ @@ -198,6 +243,7 @@ for model ∈ [ EvoTreeClassifier(), EvoTreeCount(), EvoTreeRegressor(), + EvoTreeMLE(), EvoTreeGaussian(), ] diff --git a/test/core.jl b/test/core.jl index 32e9c522..68dc7ed7 100644 --- a/test/core.jl +++ b/test/core.jl @@ -24,7 +24,7 @@ y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] @testset "EvoTreeRegressor - Linear" begin # linear params1 = EvoTreeRegressor( - loss=:linear, metric=:mse, + loss=:linear, nrounds=100, nbins=100, lambda=0.5, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, @@ -33,7 +33,7 @@ y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -43,7 +43,7 @@ end @testset "EvoTreeRegressor - Logistic" begin params1 = EvoTreeRegressor( - loss=:logistic, metric=:logloss, + loss=:logistic, nrounds=100, lambda=0.5, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, @@ -52,7 +52,7 @@ end model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:logloss, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -62,7 +62,7 @@ end @testset "EvoTreeRegressor - Gamma" begin params1 = EvoTreeRegressor( - loss=:gamma, metric=:gamma, + loss=:gamma, nrounds=100, lambda=0.5, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, @@ -71,7 +71,7 @@ end model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:gamma, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -81,7 +81,7 @@ end @testset "EvoTreeRegressor - Tweedie" begin params1 = EvoTreeRegressor( - loss=:tweedie, metric=:tweedie, + loss=:tweedie, nrounds=100, lambda=0.5, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, @@ -90,7 +90,7 @@ end model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:tweedie, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -100,7 +100,7 @@ end @testset "EvoTreeRegressor - L1" begin params1 = EvoTreeRegressor( - loss=:L1, alpha=0.5, metric=:mae, + loss=:L1, alpha=0.5, nrounds=100, nbins=100, lambda=0.5, gamma=0.0, eta=0.05, max_depth=6, min_weight=1.0, @@ -109,7 +109,7 @@ end model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mae, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -119,7 +119,7 @@ end @testset "EvoTreeRegressor - Quantile" begin params1 = EvoTreeRegressor( - loss=:quantile, alpha=0.5, metric=:quantile, + loss=:quantile, alpha=0.5, nrounds=100, nbins=100, lambda=0.5, gamma=0.0, eta=0.05, max_depth=6, min_weight=1.0, @@ -128,7 +128,7 @@ end model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:quantile, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -138,7 +138,7 @@ end @testset "EvoTreeCount - Count" begin params1 = EvoTreeCount( - loss=:poisson, metric=:poisson, + loss=:poisson, nrounds=100, lambda=0.5, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, @@ -147,7 +147,7 @@ end model, cache = EvoTrees.init_evotree(params1, x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:poisson, print_every_n=25) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -155,9 +155,46 @@ end @test mse_gain_pct < -0.75 end +@testset "EvoTreeMLE - Gaussian" begin + params1 = EvoTreeMLE( + loss=:gaussian, + nrounds=100, nbins=100, + lambda=0.0, gamma=0.0, eta=0.05, + max_depth=6, min_weight=10.0, + rowsample=0.5, colsample=1.0, rng=123) + + model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + preds_ini = EvoTrees.predict(model, x_eval)[:, 1] + mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:gaussian, print_every_n=25) + + preds = EvoTrees.predict(model, x_eval)[:, 1] + mse_error = mean(abs.(preds .- y_eval) .^ 2) + mse_gain_pct = mse_error / mse_error_ini - 1 + @test mse_gain_pct < -0.75 +end + +@testset "EvoTreeMLE - Logistic" begin + params1 = EvoTreeMLE( + loss=:logistic, + nrounds=100, nbins=100, + lambda=0.0, gamma=0.0, eta=0.05, + max_depth=6, min_weight=10.0, + rowsample=0.5, colsample=1.0, rng=123) + + model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + preds_ini = EvoTrees.predict(model, x_eval)[:, 1] + mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:logistic, print_every_n=25) + + preds = EvoTrees.predict(model, x_eval)[:, 1] + mse_error = mean(abs.(preds .- y_eval) .^ 2) + mse_gain_pct = mse_error / mse_error_ini - 1 + @test mse_gain_pct < -0.75 +end + @testset "EvoTreeGaussian - Gaussian" begin params1 = EvoTreeGaussian( - loss=:gaussian, metric=:gaussian, nrounds=100, nbins=100, lambda=0.0, gamma=0.0, eta=0.05, max_depth=6, min_weight=10.0, @@ -176,7 +213,7 @@ end @testset "EvoTrees - Feature Importance" begin params1 = EvoTreeRegressor( - loss=:linear, metric=:mae, + loss=:linear, nrounds=100, nbins=100, lambda=0.5, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, From ed4f8abdeff437d4d8e865de72831497c096310b Mon Sep 17 00:00:00 2001 From: "jeremie.desgagne.bouchard" Date: Sun, 16 Oct 2022 23:12:06 -0400 Subject: [PATCH 04/11] up --- experiments/benchmarks_v2-MLE.jl | 8 ++++---- test/MLJ.jl | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/experiments/benchmarks_v2-MLE.jl b/experiments/benchmarks_v2-MLE.jl index 1b49ae50..0aa30c75 100644 --- a/experiments/benchmarks_v2-MLE.jl +++ b/experiments/benchmarks_v2-MLE.jl @@ -13,14 +13,14 @@ nthread = Base.Threads.nthreads() # EvoTrees params params_evo = EvoTreeMLE( - T=Float32, + T=Float64, loss=:gaussian, nrounds=nrounds, lambda=0.0, gamma=0.0, eta=0.05, max_depth=6, - min_weight=1.0, + min_weight=100.0, rowsample=0.5, colsample=0.5, nbins=64, @@ -53,14 +53,14 @@ params_evo.device = "gpu" # Logistic ################################ params_evo = EvoTreeMLE( - T=Float32, + T=Float64, loss=:logistic, nrounds=nrounds, lambda=0.0, gamma=0.0, eta=0.05, max_depth=6, - min_weight=1.0, + min_weight=100.0, rowsample=0.5, colsample=0.5, nbins=64, diff --git a/test/MLJ.jl b/test/MLJ.jl index 6383c5dc..5692065a 100644 --- a/test/MLJ.jl +++ b/test/MLJ.jl @@ -194,7 +194,7 @@ tree_model = EvoTreeMLE( loss=:logistic, nrounds=10, lambda=1.0, gamma=0.0, eta=0.1, - max_depth=6, min_weight=1.0, + max_depth=6, min_weight=32.0, rowsample=0.5, colsample=0.5, nbins=32) X = MLJBase.table(X) From f6aea783f09bcd02a35836422da260a52991ce03 Mon Sep 17 00:00:00 2001 From: "jeremie.desgagne.bouchard" Date: Sun, 16 Oct 2022 23:20:22 -0400 Subject: [PATCH 05/11] up --- src/loss.jl | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/loss.jl b/src/loss.jl index 74da719b..2dbde1b8 100644 --- a/src/loss.jl +++ b/src/loss.jl @@ -80,14 +80,13 @@ end # pred[i][1] = ΞΌ # pred[i][2] = log(Οƒ) function update_grads!(::Type{GaussianDist}, δ𝑀::Matrix, p::Matrix, y::Vector; kwargs...) - Ο΅ = eltype(p)(2e-7) @inbounds @simd for i in eachindex(y) # first order δ𝑀[1, i] = (p[1, i] - y[i]) / exp(2 * p[2, i]) * δ𝑀[5, i] δ𝑀[2, i] = (1 - (p[1, i] - y[i])^2 / exp(2 * p[2, i])) * δ𝑀[5, i] # second order - δ𝑀[3, i] = max(Ο΅, δ𝑀[5, i] / exp(2 * p[2, i])) - δ𝑀[4, i] = max(Ο΅, δ𝑀[5, i] * 2 / exp(2 * p[2, i]) * (p[1, i] - y[i])^2) + δ𝑀[3, i] = δ𝑀[5, i] / exp(2 * p[2, i]) + δ𝑀[4, i] = δ𝑀[5, i] * 2 / exp(2 * p[2, i]) * (p[1, i] - y[i])^2 end end @@ -102,8 +101,8 @@ function update_grads!(::Type{LogisticDist}, δ𝑀::Matrix, p::Matrix, y::Vecto δ𝑀[1, i] = -tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) * exp(-p[2, i]) * δ𝑀[5, i] δ𝑀[2, i] = -(exp(-p[2, i]) * (y[i] - p[1, i]) * tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) - 1) * δ𝑀[5, i] # second order - δ𝑀[3, i] = max(Ο΅, sech((y[i] - p[1, i]) / (2 * exp(p[2, i])))^2 / (2 * exp(2 * p[2, i])) * δ𝑀[5, i]) - δ𝑀[4, i] = max(Ο΅, (exp(-2 * p[2, i]) * (p[1, i] - y[i]) * (p[1, i] - y[i] + exp(p[2, i]) * sinh(exp(-p[2, i]) * (p[1, i] - y[i])))) / (1 + cosh(exp(-p[2, i]) * (p[1, i] - y[i]))) * δ𝑀[5, i]) + δ𝑀[3, i] = sech((y[i] - p[1, i]) / (2 * exp(p[2, i])))^2 / (2 * exp(2 * p[2, i])) * δ𝑀[5, i] + δ𝑀[4, i] = (exp(-2 * p[2, i]) * (p[1, i] - y[i]) * (p[1, i] - y[i] + exp(p[2, i]) * sinh(exp(-p[2, i]) * (p[1, i] - y[i])))) / (1 + cosh(exp(-p[2, i]) * (p[1, i] - y[i]))) * δ𝑀[5, i] end end From caa644acad8d3e6fabf173ec56addd0e4e23da8b Mon Sep 17 00:00:00 2001 From: "jeremie.desgagne.bouchard" Date: Sun, 16 Oct 2022 23:21:16 -0400 Subject: [PATCH 06/11] up --- experiments/benchmarks_v2-MLE.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/experiments/benchmarks_v2-MLE.jl b/experiments/benchmarks_v2-MLE.jl index 0aa30c75..647b8300 100644 --- a/experiments/benchmarks_v2-MLE.jl +++ b/experiments/benchmarks_v2-MLE.jl @@ -32,7 +32,7 @@ y_train = rand(size(x_train, 1)) @info "evotrees train CPU:" params_evo.device = "cpu" -@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, print_every_n=50); +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, print_every_n=100); @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:gaussian); @info "evotrees predict CPU:" @time pred_evo = EvoTrees.predict(m_evo, x_train); @@ -42,7 +42,7 @@ CUDA.allowscalar(true) @info "evotrees train GPU:" params_evo.device = "gpu" @time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); -@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, print_every_n=50); +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:gaussian, print_every_n=100); @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:gaussian); @info "evotrees predict GPU:" @time pred_evo = EvoTrees.predict(m_evo_gpu, x_train); @@ -72,7 +72,7 @@ y_train = rand(size(x_train, 1)) @info "evotrees train CPU:" params_evo.device = "cpu" -@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:logistic, print_every_n=50); +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=:logistic, print_every_n=100); @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=:logistic); @info "evotrees predict CPU:" @time pred_evo = EvoTrees.predict(m_evo, x_train); From e32438f6155ab0df643955f0b12cb765480858ac Mon Sep 17 00:00:00 2001 From: jeremie Date: Mon, 17 Oct 2022 19:25:58 -0400 Subject: [PATCH 07/11] test serialization --- test/save_load.jl | 61 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 test/save_load.jl diff --git a/test/save_load.jl b/test/save_load.jl new file mode 100644 index 00000000..7b61af18 --- /dev/null +++ b/test/save_load.jl @@ -0,0 +1,61 @@ +using Statistics +using StatsBase: sample, quantile +using Distributions +using Random +using EvoTrees +using EvoTrees: sigmoid, logit +using Serialization + +# prepare a dataset +Random.seed!(12) +features = rand(10_000) .* 5 +X = reshape(features, (size(features)[1], 1)) +Y = sin.(features) .* 0.5 .+ 0.5 +Y = logit(Y) + randn(size(Y)) +Y = sigmoid(Y) +𝑖 = collect(1:size(X, 1)) + +# train-eval split +𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +train_size = 0.8 +𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] +𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] + +x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :] +y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] + +# linear +params1 = EvoTreeRegressor(T=Float64, + loss=:linear, metric=:mse, + nrounds=200, nbins=64, + lambda=0.1, gamma=0.1, eta=0.05, + max_depth=6, min_weight=1.0, + rowsample=0.5, colsample=1.0, + rng=123) + +m = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +p = m(x_eval) + +# serialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat"), m); +# serialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat"), p); + +m_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v172.dat")); +p_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v172.dat")); +pm_172 = m_172(x_eval) + +m_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v180.dat")); +p_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v180.dat")); +pm_180 = m_180(x_eval) + +m_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat")); +p_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat")); +pm_182 = m_182(x_eval) + +@assert all(p .== p_172) +@assert all(p .== pm_172) +@assert all(p .== p_180) +@assert all(p .== pm_180) +@assert all(p .== p_182) +@assert all(p .== pm_182) + +@info "test successful! πŸš€" \ No newline at end of file From 8fb2cbcbc82e7710ba6687a135cb5ebf4e317ae6 Mon Sep 17 00:00:00 2001 From: jeremie Date: Mon, 17 Oct 2022 21:54:23 -0400 Subject: [PATCH 08/11] up --- experiments/readme_plots_cpu.jl | 24 +-- experiments/readme_plots_gpu.jl | 3 +- src/MLJ.jl | 213 ++++++++++++--------- src/find_split.jl | 199 ++++++++++++++++---- src/fit.jl | 106 +++++------ src/gpu/fit_gpu.jl | 144 ++++++++++----- src/importance.jl | 2 +- src/loss.jl | 59 ++++-- src/models.jl | 99 ++++++---- src/plot.jl | 45 +++-- src/predict.jl | 68 ++++++- src/structs.jl | 12 +- test/MLJ.jl | 90 +++++---- test/core.jl | 317 +++++++++++++++++++++++--------- test/gpu_base.jl | 122 ++++++++---- test/monotonic.jl | 150 +++++++++------ test/plot.jl | 8 +- test/save_load.jl | 60 +++--- 18 files changed, 1164 insertions(+), 557 deletions(-) diff --git a/experiments/readme_plots_cpu.jl b/experiments/readme_plots_cpu.jl index f80ae569..b660a375 100644 --- a/experiments/readme_plots_cpu.jl +++ b/experiments/readme_plots_cpu.jl @@ -51,7 +51,7 @@ sqrt(mean((pred_train_linear .- y_train) .^ 2)) # linear weighted params1 = EvoTreeRegressor(T=Float64, - loss=:linear, metric=:mse, + loss=:linear, nrounds=200, nbins=64, lambda=0.1, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, @@ -61,7 +61,7 @@ params1 = EvoTreeRegressor(T=Float64, # W_train = ones(eltype(Y_train), size(Y_train)) .* 5 w_train = rand(eltype(y_train), size(y_train)) .+ 0 -@time model = fit_evotree(params1; x_train, y_train, w_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, w_train, x_eval, y_eval, print_every_n=25, metric=:mse); # 67.159 ms (77252 allocations: 28.06 MiB) # @time model = fit_evotree(params1, X_train, Y_train, X_eval = X_eval, Y_eval = Y_eval, print_every_n = 999); # @btime model = fit_evotree($params1, $X_train, $Y_train, X_eval = $X_eval, Y_eval = $Y_eval); @@ -77,13 +77,13 @@ sqrt(mean((pred_train_linear_w .- y_train) .^ 2)) # logistic / cross-entropy params1 = EvoTreeRegressor( - loss=:logistic, metric=:logloss, + loss=:logistic, nrounds=200, nbins=64, lambda=0.1, gamma=0.1, eta=0.05, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=1.0) -@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25, metric=:logloss); # 218.040 ms (123372 allocations: 34.71 MiB) # @btime model = fit_evotree($params1, $X_train, $Y_train, X_eval = $X_eval, Y_eval = $Y_eval) @time pred_train_logistic = predict(model, x_train); @@ -97,7 +97,7 @@ params1 = EvoTreeRegressor( lambda=0.0, gamma=0.0, eta=0.05, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=1.0) -@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25, metric=:mae); @time pred_train_L1 = predict(model, x_train) @time pred_eval_L1 = predict(model, x_eval) sqrt(mean((pred_train_L1 .- y_train) .^ 2)) @@ -112,34 +112,34 @@ savefig("figures/regression_sinus.png") # Poisson params1 = EvoTreeCount( - loss=:poisson, metric=:poisson, + loss=:poisson, nrounds=200, nbins=64, lambda=0.5, gamma=0.1, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=1.0) -@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25, metric=:poisson); @time pred_train_poisson = predict(model, x_train); sqrt(mean((pred_train_poisson .- y_train) .^ 2)) # Gamma params1 = EvoTreeRegressor( - loss=:gamma, metric=:gamma, + loss=:gamma, nrounds=200, nbins=64, lambda=0.5, gamma=0.1, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=1.0) -@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25, metric=:gamma); @time pred_train_gamma = predict(model, x_train); sqrt(mean((pred_train_gamma .- y_train) .^ 2)) # Tweedie params1 = EvoTreeRegressor( - loss=:tweedie, metric=:tweedie, + loss=:tweedie, nrounds=200, nbins=64, lambda=0.5, gamma=0.1, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=1.0) -@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25, metric=:tweedie); @time pred_train_tweedie = predict(model, x_train); sqrt(mean((pred_train_tweedie .- y_train) .^ 2)) @@ -180,7 +180,7 @@ sum(pred_train_q20 .< y_train) / length(y_train) # q80 params1 = EvoTreeRegressor( - loss=:quantile, alpha=0.8, metric=:none, + loss=:quantile, alpha=0.8, nrounds=200, nbins=64, lambda=1.0, gamma=0.0, eta=0.05, max_depth=6, min_weight=1.0, diff --git a/experiments/readme_plots_gpu.jl b/experiments/readme_plots_gpu.jl index cbe94b19..e4298c3c 100644 --- a/experiments/readme_plots_gpu.jl +++ b/experiments/readme_plots_gpu.jl @@ -37,6 +37,7 @@ params1 = EvoTreeRegressor(T=Float32, device="gpu") @time model = fit_evotree(params1; x_train, y_train); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25); model_cpu = convert(EvoTrees.GBTree, model); pred_train_linear_gpu = predict(model, x_train) pred_train_linear_cpu = predict(model_cpu, x_train) @@ -122,7 +123,7 @@ params1 = EvoTreeGaussian(T=Float32, device="gpu") @time model = fit_evotree(params1; x_train, y_train); -# @time model = fit_evotree(params1, X_train, Y_train, X_eval=X_eval, Y_eval=Y_eval, print_every_n=25); +@time model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25, metric=:gaussian); # @time model = fit_evotree(params1, X_train, Y_train, print_every_n = 10); @time pred_train_gaussian = EvoTrees.predict(model, x_train) diff --git a/src/MLJ.jl b/src/MLJ.jl index 3ecc1bb3..72875d3d 100644 --- a/src/MLJ.jl +++ b/src/MLJ.jl @@ -1,87 +1,95 @@ function MMI.fit(model::EvoTypes, verbosity::Int, A, y) - if model.device == "gpu" - fitresult, cache = init_evotree_gpu(model, A.matrix, y) - else - fitresult, cache = init_evotree(model, A.matrix, y) - end - grow_evotree!(fitresult, cache) - report = (features=A.names,) - return fitresult, cache, report + if model.device == "gpu" + fitresult, cache = init_evotree_gpu(model; x_train = A.matrix, y_train = y) + else + fitresult, cache = init_evotree(model; x_train = A.matrix, y_train = y) + end + grow_evotree!(fitresult, cache) + report = (features = A.names,) + return fitresult, cache, report end function okay_to_continue(new, old) - new.nrounds - old.nrounds >= 0 && - new.lambda == old.lambda && - new.gamma == old.gamma && - new.max_depth == old.max_depth && - new.min_weight == old.min_weight && - new.rowsample == old.rowsample && - new.colsample == old.colsample && - new.nbins == old.nbins && - new.alpha == old.alpha && - new.device == old.device + new.nrounds - old.nrounds >= 0 && + new.lambda == old.lambda && + new.gamma == old.gamma && + new.max_depth == old.max_depth && + new.min_weight == old.min_weight && + new.rowsample == old.rowsample && + new.colsample == old.colsample && + new.nbins == old.nbins && + new.alpha == old.alpha && + new.device == old.device end # Generate names to be used by feature_importances in the report -MMI.reformat(::EvoTypes, X, y) = ((matrix=MMI.matrix(X), names=[name for name ∈ schema(X).names]), y) -MMI.reformat(::EvoTypes, X) = ((matrix=MMI.matrix(X), names=[name for name ∈ schema(X).names]),) -MMI.reformat(::EvoTypes, X::AbstractMatrix, y) = ((matrix=X, names=["feat_$i" for i = 1:size(X, 2)]), y) -MMI.reformat(::EvoTypes, X::AbstractMatrix) = ((matrix=X, names=["feat_$i" for i = 1:size(X, 2)]),) -MMI.selectrows(::EvoTypes, I, A, y) = ((matrix=view(A.matrix, I, :), names=A.names), view(y, I)) -MMI.selectrows(::EvoTypes, I, A) = ((matrix=view(A.matrix, I, :), names=A.names),) +MMI.reformat(::EvoTypes, X, y) = + ((matrix = MMI.matrix(X), names = [name for name ∈ schema(X).names]), y) +MMI.reformat(::EvoTypes, X) = + ((matrix = MMI.matrix(X), names = [name for name ∈ schema(X).names]),) +MMI.reformat(::EvoTypes, X::AbstractMatrix, y) = + ((matrix = X, names = ["feat_$i" for i = 1:size(X, 2)]), y) +MMI.reformat(::EvoTypes, X::AbstractMatrix) = + ((matrix = X, names = ["feat_$i" for i = 1:size(X, 2)]),) +MMI.selectrows(::EvoTypes, I, A, y) = + ((matrix = view(A.matrix, I, :), names = A.names), view(y, I)) +MMI.selectrows(::EvoTypes, I, A) = ((matrix = view(A.matrix, I, :), names = A.names),) # For EarlyStopping.jl support MMI.iteration_parameter(::Type{<:EvoTypes}) = :nrounds function MMI.update(model::EvoTypes, verbosity::Integer, fitresult, cache, A, y) - if okay_to_continue(model, cache.params) - grow_evotree!(fitresult, cache) - else - fitresult, cache = init_evotree(model, A.matrix, y) - grow_evotree!(fitresult, cache) - end - report = (features=A.names,) - return fitresult, cache, report + if okay_to_continue(model, cache.params) + grow_evotree!(fitresult, cache) + else + if model.device == "gpu" + fitresult, cache = init_evotree_gpu(model; x_train = A.matrix, y_train = y) + else + fitresult, cache = init_evotree(model; x_train = A.matrix, y_train = y) + end + grow_evotree!(fitresult, cache) + end + report = (features = A.names,) + return fitresult, cache, report end function predict(::EvoTreeRegressor, fitresult, A) - pred = vec(predict(fitresult, A.matrix)) - return pred + pred = vec(predict(fitresult, A.matrix)) + return pred end function predict(::EvoTreeClassifier, fitresult, A) - pred = predict(fitresult, A.matrix) - return MMI.UnivariateFinite(fitresult.info[:levels], pred, pool=missing) + pred = predict(fitresult, A.matrix) + return MMI.UnivariateFinite(fitresult.info[:levels], pred, pool = missing) end function predict(::EvoTreeCount, fitresult, A) - Ξ»s = vec(predict(fitresult, A.matrix)) - return [Distributions.Poisson(Ξ») for Ξ» ∈ Ξ»s] + Ξ»s = vec(predict(fitresult, A.matrix)) + return [Distributions.Poisson(Ξ») for Ξ» ∈ Ξ»s] end function predict(::EvoTreeGaussian, fitresult, A) - pred = predict(fitresult, A.matrix) - return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] + pred = predict(fitresult, A.matrix) + return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] end function predict(::EvoTreeMLE{L,T,S}, fitresult, A) where {L<:GaussianDist,T,S} - pred = predict(fitresult, A.matrix) - return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] + pred = predict(fitresult, A.matrix) + return [Distributions.Normal(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] end function predict(::EvoTreeMLE{L,T,S}, fitresult, A) where {L<:LogisticDist,T,S} - pred = predict(fitresult, A.matrix) - @info pred - return [Distributions.Logistic(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] + pred = predict(fitresult, A.matrix) + return [Distributions.Logistic(pred[i, 1], pred[i, 2]) for i in axes(pred, 1)] end # Feature Importances MMI.reports_feature_importances(::Type{<:EvoTypes}) = true function MMI.feature_importances(m::EvoTypes, fitresult, report) - fi_pairs = importance(fitresult) - return fi_pairs + fi_pairs = importance(fitresult) + return fi_pairs end @@ -92,48 +100,75 @@ const EvoTreeCount_desc = "Poisson regression fitting Ξ» with deviance minimizat const EvoTreeGaussian_desc = "Deprecated - Use EvoTreeMLE with `loss=:normal` instead. Gaussian maximum likelihood of ΞΌ and Οƒ." const EvoTreeMLE_desc = "Maximum likelihood methods supporting Normal/Gaussian and Logistic distributions." -MMI.metadata_pkg.((EvoTreeRegressor, EvoTreeClassifier, EvoTreeCount, EvoTreeGaussian), - name="EvoTrees", - uuid="f6006082-12f8-11e9-0c9c-0d5d367ab1e5", - url="https://github.com/Evovest/EvoTrees.jl", - julia=true, - license="Apache", - is_wrapper=false) - -MMI.metadata_model(EvoTreeRegressor, - input_scitype=Union{MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor),AbstractMatrix{MMI.Continuous}}, - target_scitype=AbstractVector{<:MMI.Continuous}, - weights=false, - path="EvoTrees.EvoTreeRegressor", - descr=EvoTreeRegressor_desc) - -MMI.metadata_model(EvoTreeClassifier, - input_scitype=Union{MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor),AbstractMatrix{MMI.Continuous}}, - target_scitype=AbstractVector{<:MMI.Finite}, - weights=false, - path="EvoTrees.EvoTreeClassifier", - descr=EvoTreeClassifier_desc) - -MMI.metadata_model(EvoTreeCount, - input_scitype=Union{MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor),AbstractMatrix{MMI.Continuous}}, - target_scitype=AbstractVector{<:MMI.Count}, - weights=false, - path="EvoTrees.EvoTreeCount", - descr=EvoTreeCount_desc) - -MMI.metadata_model(EvoTreeGaussian, - input_scitype=Union{MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor),AbstractMatrix{MMI.Continuous}}, - target_scitype=AbstractVector{<:MMI.Continuous}, - weights=false, - path="EvoTrees.EvoTreeGaussian", - descr=EvoTreeGaussian_desc) - -MMI.metadata_model(EvoTreeMLE, - input_scitype=Union{MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor),AbstractMatrix{MMI.Continuous}}, - target_scitype=AbstractVector{<:MMI.Continuous}, - weights=false, - path="EvoTrees.EvoTreeMLE", - descr=EvoTreeMLE_desc) +MMI.metadata_pkg.( + (EvoTreeRegressor, EvoTreeClassifier, EvoTreeCount, EvoTreeGaussian), + name = "EvoTrees", + uuid = "f6006082-12f8-11e9-0c9c-0d5d367ab1e5", + url = "https://github.com/Evovest/EvoTrees.jl", + julia = true, + license = "Apache", + is_wrapper = false, +) + +MMI.metadata_model( + EvoTreeRegressor, + input_scitype = Union{ + MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor), + AbstractMatrix{MMI.Continuous}, + }, + target_scitype = AbstractVector{<:MMI.Continuous}, + weights = false, + path = "EvoTrees.EvoTreeRegressor", + descr = EvoTreeRegressor_desc, +) + +MMI.metadata_model( + EvoTreeClassifier, + input_scitype = Union{ + MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor), + AbstractMatrix{MMI.Continuous}, + }, + target_scitype = AbstractVector{<:MMI.Finite}, + weights = false, + path = "EvoTrees.EvoTreeClassifier", + descr = EvoTreeClassifier_desc, +) + +MMI.metadata_model( + EvoTreeCount, + input_scitype = Union{ + MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor), + AbstractMatrix{MMI.Continuous}, + }, + target_scitype = AbstractVector{<:MMI.Count}, + weights = false, + path = "EvoTrees.EvoTreeCount", + descr = EvoTreeCount_desc, +) + +MMI.metadata_model( + EvoTreeGaussian, + input_scitype = Union{ + MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor), + AbstractMatrix{MMI.Continuous}, + }, + target_scitype = AbstractVector{<:MMI.Continuous}, + weights = false, + path = "EvoTrees.EvoTreeGaussian", + descr = EvoTreeGaussian_desc, +) + +MMI.metadata_model( + EvoTreeMLE, + input_scitype = Union{ + MMI.Table(MMI.Continuous, MMI.Count, MMI.OrderedFactor), + AbstractMatrix{MMI.Continuous}, + }, + target_scitype = AbstractVector{<:MMI.Continuous}, + weights = false, + path = "EvoTrees.EvoTreeMLE", + descr = EvoTreeMLE_desc, +) """ EvoTreeRegressor(;kwargs...) diff --git a/src/find_split.jl b/src/find_split.jl index cc82df7d..907d5290 100644 --- a/src/find_split.jl +++ b/src/find_split.jl @@ -30,7 +30,20 @@ end Multi-threaded split_set! Take a view into left and right placeholders. Right ids are assigned at the end of the length of the current node set. """ -function split_set_chunk!(left, right, 𝑖, bid, nblocks, X_bin, feat, cond_bin, offset, chunk_size, lefts, rights) +function split_set_chunk!( + left, + right, + 𝑖, + bid, + nblocks, + X_bin, + feat, + cond_bin, + offset, + chunk_size, + lefts, + rights, +) left_count = 0 right_count = 0 @@ -53,7 +66,19 @@ function split_set_chunk!(left, right, 𝑖, bid, nblocks, X_bin, feat, cond_bin return nothing end -function split_views_kernel!(out::Vector{S}, left::Vector{S}, right::Vector{S}, bid, offset, chunk_size, lefts, rights, sum_lefts, cumsum_lefts, cumsum_rights) where {S} +function split_views_kernel!( + out::Vector{S}, + left::Vector{S}, + right::Vector{S}, + bid, + offset, + chunk_size, + lefts, + rights, + sum_lefts, + cumsum_lefts, + cumsum_rights, +) where {S} iter = 1 i_max = lefts[bid] bid == 1 ? cumsum_left = 0 : cumsum_left = cumsum_lefts[bid-1] @@ -72,7 +97,16 @@ function split_views_kernel!(out::Vector{S}, left::Vector{S}, right::Vector{S}, return nothing end -function split_set_threads!(out, left, right, 𝑖, X_bin::Matrix{S}, feat, cond_bin, offset) where {S} +function split_set_threads!( + out, + left, + right, + 𝑖, + X_bin::Matrix{S}, + feat, + cond_bin, + offset, +) where {S} # iter = Iterators.partition(𝑖, chunk_size) nblocks = ceil(Int, min(length(𝑖) / 1024, Threads.nthreads())) @@ -82,17 +116,45 @@ function split_set_threads!(out, left, right, 𝑖, X_bin::Matrix{S}, feat, cond rights = zeros(Int, nblocks) @threads for bid = 1:nblocks - split_set_chunk!(left, right, 𝑖, bid, nblocks, X_bin, feat, cond_bin, offset, chunk_size, lefts, rights) + split_set_chunk!( + left, + right, + 𝑖, + bid, + nblocks, + X_bin, + feat, + cond_bin, + offset, + chunk_size, + lefts, + rights, + ) end sum_lefts = sum(lefts) cumsum_lefts = cumsum(lefts) cumsum_rights = cumsum(rights) @threads for bid = 1:nblocks - split_views_kernel!(out, left, right, bid, offset, chunk_size, lefts, rights, sum_lefts, cumsum_lefts, cumsum_rights) + split_views_kernel!( + out, + left, + right, + bid, + offset, + chunk_size, + lefts, + rights, + sum_lefts, + cumsum_lefts, + cumsum_rights, + ) end - return (view(out, offset+1:offset+sum_lefts), view(out, offset+sum_lefts+1:offset+length(𝑖))) + return ( + view(out, offset+1:offset+sum_lefts), + view(out, offset+sum_lefts+1:offset+length(𝑖)), + ) end @@ -106,8 +168,9 @@ function update_hist!( δ𝑀::Matrix{T}, X_bin::Matrix{UInt8}, 𝑖::AbstractVector{S}, - 𝑗::AbstractVector{S}, K) where {L<:GradientRegression,T,S} - + 𝑗::AbstractVector{S}, + K, +) where {L<:GradientRegression,T,S} @threads for j in 𝑗 @inbounds @simd for i in 𝑖 hid = 3 * X_bin[i, j] - 2 @@ -129,8 +192,9 @@ function update_hist!( δ𝑀::Matrix{T}, X_bin::Matrix{UInt8}, 𝑖::AbstractVector{S}, - 𝑗::AbstractVector{S}, K) where {L<:MLE2P,T,S} - + 𝑗::AbstractVector{S}, + K, +) where {L<:MLE2P,T,S} @threads for j in 𝑗 @inbounds @simd for i in 𝑖 hid = 5 * X_bin[i, j] - 4 @@ -154,12 +218,13 @@ function update_hist!( δ𝑀::Matrix{T}, X_bin::Matrix{UInt8}, 𝑖::AbstractVector{S}, - 𝑗::AbstractVector{S}, K) where {L,T,S} - + 𝑗::AbstractVector{S}, + K, +) where {L,T,S} @threads for j in 𝑗 @inbounds for i in 𝑖 hid = (2 * K + 1) * (X_bin[i, j] - 1) - for k in 1:(2*K+1) + for k = 1:(2*K+1) hist[j][hid+k] += δ𝑀[k, i] end end @@ -180,24 +245,35 @@ Generic fallback function update_gains!( node::TrainNode, 𝑗::Vector, - params::EvoTypes{L,T,S}, K, monotone_constraints) where {L,T,S} + params::EvoTypes{L,T,S}, + K, + monotone_constraints, +) where {L,T,S} KK = 2 * K + 1 @inbounds @threads for j in 𝑗 - @inbounds for k in 1:KK + @inbounds for k = 1:KK node.hL[j][k] = node.h[j][k] node.hR[j][k] = node.βˆ‘[k] - node.h[j][k] end - @inbounds for bin in 2:params.nbins + @inbounds for bin = 2:params.nbins @inbounds for k = 1:KK binid = KK * (bin - 1) node.hL[j][binid+k] = node.hL[j][binid-KK+k] + node.h[j][binid+k] node.hR[j][binid+k] = node.hR[j][binid-KK+k] - node.h[j][binid+k] end end - hist_gains_cpu!(L, view(node.gains, :, j), node.hL[j], node.hR[j], params, K, monotone_constraints[j]) + hist_gains_cpu!( + L, + view(node.gains, :, j), + node.hL[j], + node.hR[j], + params, + K, + monotone_constraints[j], + ) end return nothing end @@ -207,8 +283,16 @@ end hist_gains_cpu! GradientRegression """ -function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR::Vector{T}, params, K, monotone_constraint) where {L<:GradientRegression,T} - @inbounds for bin in 1:params.nbins +function hist_gains_cpu!( + ::Type{L}, + gains::AbstractVector{T}, + hL::Vector{T}, + hR::Vector{T}, + params, + K, + monotone_constraint, +) where {L<:GradientRegression,T} + @inbounds for bin = 1:params.nbins i = 3 * bin - 2 # update gain only if there's non null weight on each of left and right side - except for nbins level, which is used as benchmark for split criteria (gain if no split) if bin == params.nbins @@ -219,8 +303,11 @@ function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR: if (monotone_constraint == 0) || (monotone_constraint == -1 && predL > predR) || (monotone_constraint == 1 && predL < predR) - gains[bin] = (hL[i]^2 / (hL[i+1] + params.lambda * hL[i+2]) + - hR[i]^2 / (hR[i+1] + params.lambda * hR[i+2])) / 2 + gains[bin] = + ( + hL[i]^2 / (hL[i+1] + params.lambda * hL[i+2]) + + hR[i]^2 / (hR[i+1] + params.lambda * hR[i+2]) + ) / 2 end end end @@ -231,8 +318,16 @@ end hist_gains_cpu! QuantileRegression/L1Regression """ -function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR::Vector{T}, params, K, monotone_constraint) where {L<:Union{QuantileRegression,L1Regression},T} - @inbounds for bin in 1:params.nbins +function hist_gains_cpu!( + ::Type{L}, + gains::AbstractVector{T}, + hL::Vector{T}, + hR::Vector{T}, + params, + K, + monotone_constraint, +) where {L<:Union{QuantileRegression,L1Regression},T} + @inbounds for bin = 1:params.nbins i = 3 * bin - 2 # update gain only if there's non null weight on each of left and right side - except for nbins level, which is used as benchmark for split criteria (gain if no split) if bin == params.nbins @@ -248,23 +343,39 @@ end hist_gains_cpu! MLE2P """ -function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR::Vector{T}, params, K, monotone_constraint) where {L<:MLE2P,T} - @inbounds for bin in 1:params.nbins +function hist_gains_cpu!( + ::Type{L}, + gains::AbstractVector{T}, + hL::Vector{T}, + hR::Vector{T}, + params, + K, + monotone_constraint, +) where {L<:MLE2P,T} + @inbounds for bin = 1:params.nbins i = 5 * bin - 4 # update gain only if there's non null weight on each of left and right side - except for nbins level, which is used as benchmark for split criteria (gain if no split) if bin == params.nbins - gains[bin] = (hL[i]^2 / (hL[i+2] + params.lambda * hL[i+4]) + - hL[i+1]^2 / (hL[i+3] + params.lambda * hL[i+4])) / 2 + gains[bin] = + ( + hL[i]^2 / (hL[i+2] + params.lambda * hL[i+4]) + + hL[i+1]^2 / (hL[i+3] + params.lambda * hL[i+4]) + ) / 2 elseif hL[i+4] > params.min_weight && hR[i+4] > params.min_weight predL = pred_scalar_cpu!(hL[i:i+4], params, K) predR = pred_scalar_cpu!(hR[i:i+4], params, K) if (monotone_constraint == 0) || (monotone_constraint == -1 && predL > predR) || (monotone_constraint == 1 && predL < predR) - gains[bin] = (hL[i]^2 / (hL[i+2] + params.lambda * hL[i+4]) + - hR[i]^2 / (hR[i+2] + params.lambda * hR[i+4])) / 2 + - (hL[i+1]^2 / (hL[i+3] + params.lambda * hL[i+4]) + - hR[i+1]^2 / (hR[i+3] + params.lambda * hR[i+4])) / 2 + gains[bin] = + ( + hL[i]^2 / (hL[i+2] + params.lambda * hL[i+4]) + + hR[i]^2 / (hR[i+2] + params.lambda * hR[i+4]) + ) / 2 + + ( + hL[i+1]^2 / (hL[i+3] + params.lambda * hL[i+4]) + + hR[i+1]^2 / (hR[i+3] + params.lambda * hR[i+4]) + ) / 2 end end end @@ -275,8 +386,16 @@ end hist_gains_cpu! Generic """ -function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR::Vector{T}, params, K, monotone_constraint) where {L,T} - @inbounds for bin in 1:params.nbins +function hist_gains_cpu!( + ::Type{L}, + gains::AbstractVector{T}, + hL::Vector{T}, + hR::Vector{T}, + params, + K, + monotone_constraint, +) where {L,T} + @inbounds for bin = 1:params.nbins i = (2 * K + 1) * (bin - 1) # update gain only if there's non null weight on each of left and right side - except for nbins level, which is used as benchmark for split criteria (gain if no split) if bin == params.nbins @@ -290,11 +409,17 @@ function hist_gains_cpu!(::Type{L}, gains::AbstractVector{T}, hL::Vector{T}, hR: elseif hL[i+2*K+1] > params.min_weight && hR[i+2*K+1] > params.min_weight for k = 1:K if k == 1 - gains[bin] = (hL[i+k]^2 / (hL[i+k+K] + params.lambda * hL[i+2*K+1]) + - hR[i+k]^2 / (hR[i+k+K] + params.lambda * hR[i+2*K+1])) / 2 + gains[bin] = + ( + hL[i+k]^2 / (hL[i+k+K] + params.lambda * hL[i+2*K+1]) + + hR[i+k]^2 / (hR[i+k+K] + params.lambda * hR[i+2*K+1]) + ) / 2 else - gains[bin] += (hL[i+k]^2 / (hL[i+k+K] + params.lambda * hL[i+2*K+1]) + - hR[i+k]^2 / (hR[i+k+K] + params.lambda * hR[i+2*K+1])) / 2 + gains[bin] += + ( + hL[i+k]^2 / (hL[i+k+K] + params.lambda * hL[i+2*K+1]) + + hR[i+k]^2 / (hR[i+k+K] + params.lambda * hR[i+2*K+1]) + ) / 2 end end end diff --git a/src/fit.jl b/src/fit.jl index 66e63319..fe7ff45a 100644 --- a/src/fit.jl +++ b/src/fit.jl @@ -4,104 +4,104 @@ Initialise EvoTree """ function init_evotree( - params::EvoTypes{L,T,S}, - X::AbstractMatrix, - Y::AbstractVector, - W = nothing, - offset = nothing; + params::EvoTypes{L,T,S}; + x_train::AbstractMatrix, + y_train::AbstractVector, + w_train = nothing, + offset_train = nothing, fnames = nothing, ) where {L,T,S} K = 1 levels = nothing - X = convert(Matrix{T}, X) - + x = convert(Matrix{T}, x_train) + offset = !isnothing(offset_train) ? T.(offset_train) : nothing if L == Logistic - Y = T.(Y) - ΞΌ = [logit(mean(Y))] + y = T.(y_train) + ΞΌ = [logit(mean(y))] !isnothing(offset) && (offset .= logit.(offset)) elseif L in [Poisson, Gamma, Tweedie] - Y = T.(Y) - ΞΌ = fill(log(mean(Y)), 1) + y = T.(y_train) + ΞΌ = fill(log(mean(y)), 1) !isnothing(offset) && (offset .= log.(offset)) elseif L == Softmax - if eltype(Y) <: CategoricalValue - levels = CategoricalArrays.levels(Y) + if eltype(y_train) <: CategoricalValue + levels = CategoricalArrays.levels(y_train) K = length(levels) ΞΌ = zeros(T, K) - Y = UInt32.(CategoricalArrays.levelcode.(Y)) + y = UInt32.(CategoricalArrays.levelcode.(y_train)) else - levels = sort(unique(Y)) - yc = CategoricalVector(Y, levels = levels) + levels = sort(unique(y_train)) + yc = CategoricalVector(y_train, levels = levels) K = length(levels) ΞΌ = zeros(T, K) - Y = UInt32.(CategoricalArrays.levelcode.(yc)) + y = UInt32.(CategoricalArrays.levelcode.(yc)) end !isnothing(offset) && (offset .= log.(offset)) elseif L == GaussianDist K = 2 - Y = T.(Y) - ΞΌ = [mean(Y), log(std(Y))] + y = T.(y_train) + ΞΌ = [mean(y), log(std(y))] !isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2])) elseif L == LogisticDist K = 2 - Y = T.(Y) - ΞΌ = [mean(Y), log(std(Y) * sqrt(3) / Ο€)] + y = T.(y_train) + ΞΌ = [mean(y), log(std(y) * sqrt(3) / Ο€)] !isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2])) else - Y = T.(Y) - ΞΌ = [mean(Y)] + y = T.(y_train) + ΞΌ = [mean(y)] end ΞΌ = T.(ΞΌ) # force a neutral bias/initial tree when offset is specified !isnothing(offset) && (ΞΌ .= 0) # initialize preds - X_size = size(X) - pred = zeros(T, K, X_size[1]) - @inbounds for i = 1:X_size[1] + x_size = size(x) + pred = zeros(T, K, x_size[1]) + @inbounds for i = 1:x_size[1] pred[:, i] .= ΞΌ end !isnothing(offset) && (pred .+= offset') # init GBTree bias = [Tree{L,T}(ΞΌ)] - fnames = isnothing(fnames) ? ["feat_$i" for i in axes(X, 2)] : string.(fnames) - @assert length(fnames) == size(X, 2) + fnames = isnothing(fnames) ? ["feat_$i" for i in axes(x, 2)] : string.(fnames) + @assert length(fnames) == size(x, 2) info = Dict(:fnames => fnames, :levels => levels) evotree = GBTree{L,T,S}(bias, params, Metric(), K, info) # initialize gradients and weights - δ𝑀 = zeros(T, 2 * K + 1, X_size[1]) - W = isnothing(W) ? ones(T, size(Y)) : Vector{T}(W) - @assert (length(Y) == length(W) && minimum(W) > 0) - δ𝑀[end, :] .= W + δ𝑀 = zeros(T, 2 * K + 1, x_size[1]) + w = isnothing(w_train) ? ones(T, size(y)) : Vector{T}(w_train) + @assert (length(y) == length(w) && minimum(w) > 0) + δ𝑀[end, :] .= w # binarize data into quantiles - edges = get_edges(X, params.nbins) - X_bin = binarize(X, edges) + edges = get_edges(x, params.nbins) + x_bin = binarize(x, edges) - 𝑖_ = UInt32.(collect(1:X_size[1])) - 𝑗_ = UInt32.(collect(1:X_size[2])) - 𝑗 = zeros(eltype(𝑗_), ceil(Int, params.colsample * X_size[2])) + 𝑖_ = UInt32.(collect(1:x_size[1])) + 𝑗_ = UInt32.(collect(1:x_size[2])) + 𝑗 = zeros(eltype(𝑗_), ceil(Int, params.colsample * x_size[2])) # initialize histograms - nodes = [TrainNode(X_size[2], params.nbins, K, T) for n = 1:2^params.max_depth-1] - nodes[1].𝑖 = zeros(eltype(𝑖_), ceil(Int, params.rowsample * X_size[1])) + nodes = [TrainNode(x_size[2], params.nbins, K, T) for n = 1:2^params.max_depth-1] + nodes[1].𝑖 = zeros(eltype(𝑖_), ceil(Int, params.rowsample * x_size[1])) out = zeros(UInt32, length(nodes[1].𝑖)) left = zeros(UInt32, length(nodes[1].𝑖)) right = zeros(UInt32, length(nodes[1].𝑖)) # assign monotone contraints in constraints vector - monotone_constraints = zeros(Int32, X_size[2]) + monotone_constraints = zeros(Int32, x_size[2]) hasproperty(params, :monotone_constraint) && for (k, v) in params.monotone_constraints monotone_constraints[k] = v end cache = ( params = deepcopy(params), - X = X, - Y = Y, + x = x, + y = y, K = K, nodes = nodes, pred = pred, @@ -113,7 +113,7 @@ function init_evotree( right = right, δ𝑀 = δ𝑀, edges = edges, - X_bin = X_bin, + x_bin = x_bin, monotone_constraints = monotone_constraints, ) @@ -136,7 +136,7 @@ function grow_evotree!(evotree::GBTree{L,T,S}, cache) where {L,T,S} sample!(params.rng, cache.𝑗_, cache.𝑗, replace = false, ordered = true) # build a new tree - update_grads!(L, cache.δ𝑀, cache.pred, cache.Y; alpha = params.alpha) + update_grads!(L, cache.δ𝑀, cache.pred, cache.y; alpha = params.alpha) # assign a root and grow tree tree = Tree{L,T}(params.max_depth, evotree.K, zero(T)) grow_tree!( @@ -149,12 +149,12 @@ function grow_evotree!(evotree::GBTree{L,T,S}, cache) where {L,T,S} cache.out, cache.left, cache.right, - cache.X_bin, + cache.x_bin, cache.K, cache.monotone_constraints, ) push!(evotree.trees, tree) - predict!(cache.pred, tree, cache.X, cache.K) + predict!(cache.pred, tree, cache.x, cache.K) end # end of nrounds cache.params.nrounds = params.nrounds @@ -172,7 +172,7 @@ function grow_tree!( out, left, right, - X_bin::AbstractMatrix, + x_bin::AbstractMatrix, K, monotone_constraints, ) where {L,T,S} @@ -207,7 +207,7 @@ function grow_tree!( nodes[n].h .= nodes[n>>1].h .- nodes[n-1].h end else - update_hist!(L, nodes[n].h, δ𝑀, X_bin, nodes[n].𝑖, 𝑗, K) + update_hist!(L, nodes[n].h, δ𝑀, x_bin, nodes[n].𝑖, 𝑗, K) end end end @@ -236,7 +236,7 @@ function grow_tree!( left, right, nodes[n].𝑖, - X_bin, + x_bin, tree.feat[n], tree.cond_bin[n], offset, @@ -329,9 +329,9 @@ function fit_evotree( if params.device == "gpu" model, cache = - init_evotree_gpu(params, x_train, y_train, w_train, offset_train; fnames) + init_evotree_gpu(params; x_train, y_train, w_train, offset_train, fnames) else - model, cache = init_evotree(params, x_train, y_train, w_train, offset_train; fnames) + model, cache = init_evotree(params; x_train, y_train, w_train, offset_train, fnames) end if !isnothing(offset_eval) @@ -346,14 +346,14 @@ function fit_evotree( if !isnothing(metric) && !isnothing(x_eval) && !isnothing(y_eval) if params.device == "gpu" x_eval = CuArray(T.(x_eval)) - y_eval = CuArray(eltype(cache.Y).(y_eval)) + y_eval = CuArray(eltype(cache.y).(y_eval)) w_eval = isnothing(w_eval) ? CUDA.ones(T, size(y_eval)) : CuArray(T.(w_eval)) p_eval = predict(model.trees[1], x_eval, model.K) !isnothing(offset_eval) && (p_eval .+= CuArray(offset_eval')) eval_vec = CUDA.zeros(T, size(y_eval, 1)) else # params.device == "cpu" x_eval = T.(x_eval) - y_eval = eltype(cache.Y).(y_eval) + y_eval = eltype(cache.y).(y_eval) w_eval = isnothing(w_eval) ? ones(T, size(y_eval)) : T.(w_eval) p_eval = predict(model.trees[1], x_eval, model.K) !isnothing(offset_eval) && (p_eval .+= offset_eval') diff --git a/src/gpu/fit_gpu.jl b/src/gpu/fit_gpu.jl index 0f9aa8e4..7dbb2f6f 100644 --- a/src/gpu/fit_gpu.jl +++ b/src/gpu/fit_gpu.jl @@ -1,80 +1,97 @@ -function init_evotree_gpu(params::EvoTypes{L,T,S}, - X::AbstractMatrix, Y::AbstractVector, W=nothing, offset=nothing; fnames=nothing) where {L,T,S} +function init_evotree_gpu( + params::EvoTypes{L,T,S}; + x_train::AbstractMatrix, + y_train::AbstractVector, + w_train = nothing, + offset_train = nothing, + fnames = nothing, +) where {L,T,S} K = 1 levels = nothing - X = convert(Matrix{T}, X) + x = convert(Matrix{T}, x_train) + offset = !isnothing(offset_train) ? T.(offset_train) : nothing if L == Logistic - Y = CuArray(T.(Y)) - ΞΌ = [logit(mean(Y))] + y = CuArray(T.(y_train)) + ΞΌ = [logit(mean(y))] !isnothing(offset) && (offset .= logit.(offset)) elseif L ∈ [Poisson, Gamma, Tweedie] - Y = CuArray(T.(Y)) - ΞΌ = fill(log(mean(Y)), 1) + y = CuArray(T.(y_train)) + ΞΌ = fill(log(mean(y)), 1) !isnothing(offset) && (offset .= log.(offset)) elseif L == GaussianDist K = 2 - Y = CuArray(T.(Y)) - ΞΌ = [mean(Y), log(std(Y))] + y = CuArray(T.(y_train)) + ΞΌ = [mean(y), log(std(y))] !isnothing(offset) && (offset[:, 2] .= log.(offset[:, 2])) else - Y = CuArray(T.(Y)) - ΞΌ = [mean(Y)] + y = CuArray(T.(y_train)) + ΞΌ = [mean(y)] end # force a neutral bias/initial tree when offset is specified !isnothing(offset) && (ΞΌ .= 0) # initialize preds - X_size = size(X) - pred = CUDA.zeros(T, K, X_size[1]) + x_size = size(x) + pred = CUDA.zeros(T, K, x_size[1]) pred .= CuArray(ΞΌ) !isnothing(offset) && (pred .+= CuArray(offset')) # init GBTree bias = [TreeGPU{L,T}(CuArray(ΞΌ))] - fnames = isnothing(fnames) ? ["feat_$i" for i in axes(X, 2)] : string.(fnames) - @assert length(fnames) == size(X, 2) + fnames = isnothing(fnames) ? ["feat_$i" for i in axes(x, 2)] : string.(fnames) + @assert length(fnames) == size(x, 2) info = Dict(:fnames => fnames, :levels => levels) evotree = GBTreeGPU{L,T,S}(bias, params, Metric(), K, info) # initialize gradients and weights - δ𝑀 = CUDA.zeros(T, 2 * K + 1, X_size[1]) - W = isnothing(W) ? CUDA.ones(T, size(Y)) : CuVector{T}(W) - @assert (length(Y) == length(W) && minimum(W) > 0) - δ𝑀[end, :] .= W + δ𝑀 = CUDA.zeros(T, 2 * K + 1, x_size[1]) + w = isnothing(w_train) ? CUDA.ones(T, size(y)) : CuVector{T}(w_train) + @assert (length(y) == length(w) && minimum(w) > 0) + δ𝑀[end, :] .= w # binarize data into quantiles - edges = get_edges(X, params.nbins) - X_bin = CuArray(binarize(X, edges)) + edges = get_edges(x, params.nbins) + x_bin = CuArray(binarize(x, edges)) - 𝑖_ = UInt32.(collect(1:X_size[1])) - 𝑗_ = UInt32.(collect(1:X_size[2])) - 𝑗 = zeros(eltype(𝑗_), ceil(Int, params.colsample * X_size[2])) + 𝑖_ = UInt32.(collect(1:x_size[1])) + 𝑗_ = UInt32.(collect(1:x_size[2])) + 𝑗 = zeros(eltype(𝑗_), ceil(Int, params.colsample * x_size[2])) # initializde histograms - nodes = [TrainNodeGPU(X_size[2], params.nbins, K, T) for n = 1:2^params.max_depth-1] - nodes[1].𝑖 = CUDA.zeros(eltype(𝑖_), ceil(Int, params.rowsample * X_size[1])) + nodes = [TrainNodeGPU(x_size[2], params.nbins, K, T) for n = 1:2^params.max_depth-1] + nodes[1].𝑖 = CUDA.zeros(eltype(𝑖_), ceil(Int, params.rowsample * x_size[1])) out = CUDA.zeros(UInt32, length(nodes[1].𝑖)) left = CUDA.zeros(UInt32, length(nodes[1].𝑖)) right = CUDA.zeros(UInt32, length(nodes[1].𝑖)) # assign monotone contraints in constraints vector - monotone_constraints = zeros(Int32, X_size[2]) + monotone_constraints = zeros(Int32, x_size[2]) hasproperty(params, :monotone_constraint) && for (k, v) in params.monotone_constraints monotone_constraints[k] = v end # store cache - cache = (params=deepcopy(params), - X=CuArray(X), X_bin=X_bin, Y=Y, K=K, - nodes=nodes, - pred=pred, - 𝑖_=𝑖_, 𝑗_=𝑗_, 𝑗=𝑗, 𝑖=Array(nodes[1].𝑖), - out=out, left=left, right=right, - δ𝑀=δ𝑀, - edges=edges, - monotone_constraints=CuArray(monotone_constraints)) + cache = ( + params = deepcopy(params), + x = CuArray(x), + x_bin = x_bin, + y = y, + K = K, + nodes = nodes, + pred = pred, + 𝑖_ = 𝑖_, + 𝑗_ = 𝑗_, + 𝑗 = 𝑗, + 𝑖 = Array(nodes[1].𝑖), + out = out, + left = left, + right = right, + δ𝑀 = δ𝑀, + edges = edges, + monotone_constraints = CuArray(monotone_constraints), + ) cache.params.nrounds = 0 @@ -86,24 +103,36 @@ function grow_evotree!(evotree::GBTreeGPU{L,T,S}, cache) where {L,T,S} # initialize from cache params = evotree.params - X_size = size(cache.X_bin) Ξ΄nrounds = params.nrounds - cache.params.nrounds # loop over nrounds for i = 1:Ξ΄nrounds # select random rows and cols - sample!(params.rng, cache.𝑖_, cache.𝑖, replace=false, ordered=true) - sample!(params.rng, cache.𝑗_, cache.𝑗, replace=false, ordered=true) + sample!(params.rng, cache.𝑖_, cache.𝑖, replace = false, ordered = true) + sample!(params.rng, cache.𝑗_, cache.𝑗, replace = false, ordered = true) cache.nodes[1].𝑖 .= CuArray(cache.𝑖) # build a new tree - update_grads_gpu!(L, cache.δ𝑀, cache.pred, cache.Y) + update_grads_gpu!(L, cache.δ𝑀, cache.pred, cache.y) # # assign a root and grow tree tree = TreeGPU{L,T}(params.max_depth, evotree.K, zero(T)) - grow_tree_gpu!(tree, cache.nodes, params, cache.δ𝑀, cache.edges, CuVector(cache.𝑗), cache.out, cache.left, cache.right, cache.X_bin, cache.K, cache.monotone_constraints) + grow_tree_gpu!( + tree, + cache.nodes, + params, + cache.δ𝑀, + cache.edges, + CuVector(cache.𝑗), + cache.out, + cache.left, + cache.right, + cache.x_bin, + cache.K, + cache.monotone_constraints, + ) push!(evotree.trees, tree) # update predctions - predict!(cache.pred, tree, cache.X, cache.K) + predict!(cache.pred, tree, cache.x, cache.K) end # end of nrounds cache.params.nrounds = params.nrounds CUDA.reclaim() @@ -117,8 +146,14 @@ function grow_tree_gpu!( params::EvoTypes{L,T,S}, δ𝑀::AbstractMatrix, edges, - 𝑗, out, left, right, - X_bin::AbstractMatrix, K, monotone_constraints) where {L,T,S} + 𝑗, + out, + left, + right, + x_bin::AbstractMatrix, + K, + monotone_constraints, +) where {L,T,S} n_next = [1] n_current = copy(n_next) @@ -133,7 +168,7 @@ function grow_tree_gpu!( end # initialize summary stats - nodes[1].βˆ‘ .= vec(sum(δ𝑀[:, nodes[1].𝑖], dims=2)) + nodes[1].βˆ‘ .= vec(sum(δ𝑀[:, nodes[1].𝑖], dims = 2)) nodes[1].gain = get_gain(L, Array(nodes[1].βˆ‘), params.lambda, K) # should use a GPU version? # grow while there are remaining active nodes - TO DO histogram substraction hits issue on GPU @@ -151,14 +186,15 @@ function grow_tree_gpu!( CUDA.synchronize() end else - update_hist_gpu!(L, nodes[n].h, δ𝑀, X_bin, nodes[n].𝑖, 𝑗, K) + update_hist_gpu!(L, nodes[n].h, δ𝑀, x_bin, nodes[n].𝑖, 𝑗, K) end end end # grow while there are remaining active nodes for n ∈ sort(n_current) - if depth == params.max_depth || @allowscalar(nodes[n].βˆ‘[end] <= params.min_weight) + if depth == params.max_depth || + @allowscalar(nodes[n].βˆ‘[end] <= params.min_weight) pred_leaf_gpu!(tree.pred, n, Array(nodes[n].βˆ‘), params) else update_gains_gpu!(nodes[n], 𝑗, params, K, monotone_constraints) @@ -177,14 +213,24 @@ function grow_tree_gpu!( pred_leaf_gpu!(tree.pred, n, Array(nodes[n].βˆ‘), params) popfirst!(n_next) else - _left, _right = split_set_threads_gpu!(out, left, right, @allowscalar(nodes[n]).𝑖, X_bin, @allowscalar(tree.feat[n]), @allowscalar(tree.cond_bin[n]), offset) + _left, _right = split_set_threads_gpu!( + out, + left, + right, + @allowscalar(nodes[n]).𝑖, + x_bin, + @allowscalar(tree.feat[n]), + @allowscalar(tree.cond_bin[n]), + offset, + ) nodes[n<<1].𝑖, nodes[n<<1+1].𝑖 = _left, _right offset += length(nodes[n].𝑖) # println("length(_left): ", length(_left), " | length(_right): ", length(_right)) # println("best: ", best) update_childs_βˆ‘_gpu!(L, nodes, n, best[2][1], best[2][2]) nodes[n<<1].gain = get_gain(L, Array(nodes[n<<1].βˆ‘), params.lambda, K) - nodes[n<<1+1].gain = get_gain(L, Array(nodes[n<<1+1].βˆ‘), params.lambda, K) + nodes[n<<1+1].gain = + get_gain(L, Array(nodes[n<<1+1].βˆ‘), params.lambda, K) if length(_right) >= length(_left) push!(n_next, n << 1) diff --git a/src/importance.jl b/src/importance.jl index e90080dc..0e2ee95a 100644 --- a/src/importance.jl +++ b/src/importance.jl @@ -23,7 +23,7 @@ function importance(model::Union{GBTree,GBTreeGPU}) gain .= gain ./ sum(gain) pairs = collect(Dict(zip(string.(fnames), gain))) - sort!(pairs, by=x -> -x[2]) + sort!(pairs, by = x -> -x[2]) return pairs end diff --git a/src/loss.jl b/src/loss.jl index 2dbde1b8..a211262c 100644 --- a/src/loss.jl +++ b/src/loss.jl @@ -39,21 +39,24 @@ function update_grads!(::Type{Tweedie}, δ𝑀::Matrix, p::Matrix, y::Vector; kw @inbounds for i in eachindex(y) pred = exp(p[1, i]) δ𝑀[1, i] = 2 * (pred^(2 - rho) - y[i] * pred^(1 - rho)) * δ𝑀[3, i] - δ𝑀[2, i] = 2 * ((2 - rho) * pred^(2 - rho) - (1 - rho) * y[i] * pred^(1 - rho)) * δ𝑀[3, i] + δ𝑀[2, i] = + 2 * ((2 - rho) * pred^(2 - rho) - (1 - rho) * y[i] * pred^(1 - rho)) * δ𝑀[3, i] end end # L1 function update_grads!(::Type{L1}, δ𝑀::Matrix, p::Matrix, y::Vector; alpha, kwargs...) @inbounds for i in eachindex(y) - δ𝑀[1, i] = (alpha * max(y[i] - p[1, i], 0) - (1 - alpha) * max(p[1, i] - y[i], 0)) * δ𝑀[3, i] + δ𝑀[1, i] = + (alpha * max(y[i] - p[1, i], 0) - (1 - alpha) * max(p[1, i] - y[i], 0)) * + δ𝑀[3, i] end end # Softmax function update_grads!(::Type{Softmax}, δ𝑀::Matrix, p::Matrix, y::Vector; kwargs...) - p .= p .- maximum(p, dims=1) - sums = sum(exp.(p), dims=1) + p .= p .- maximum(p, dims = 1) + sums = sum(exp.(p), dims = 1) K = (size(δ𝑀, 1) - 1) Γ· 2 for i in eachindex(y) for k = 1:K @@ -99,10 +102,22 @@ function update_grads!(::Type{LogisticDist}, δ𝑀::Matrix, p::Matrix, y::Vecto @inbounds @simd for i in eachindex(y) # first order δ𝑀[1, i] = -tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) * exp(-p[2, i]) * δ𝑀[5, i] - δ𝑀[2, i] = -(exp(-p[2, i]) * (y[i] - p[1, i]) * tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) - 1) * δ𝑀[5, i] + δ𝑀[2, i] = + -( + exp(-p[2, i]) * + (y[i] - p[1, i]) * + tanh((y[i] - p[1, i]) / (2 * exp(p[2, i]))) - 1 + ) * δ𝑀[5, i] # second order - δ𝑀[3, i] = sech((y[i] - p[1, i]) / (2 * exp(p[2, i])))^2 / (2 * exp(2 * p[2, i])) * δ𝑀[5, i] - δ𝑀[4, i] = (exp(-2 * p[2, i]) * (p[1, i] - y[i]) * (p[1, i] - y[i] + exp(p[2, i]) * sinh(exp(-p[2, i]) * (p[1, i] - y[i])))) / (1 + cosh(exp(-p[2, i]) * (p[1, i] - y[i]))) * δ𝑀[5, i] + δ𝑀[3, i] = + sech((y[i] - p[1, i]) / (2 * exp(p[2, i])))^2 / (2 * exp(2 * p[2, i])) * + δ𝑀[5, i] + δ𝑀[4, i] = + ( + exp(-2 * p[2, i]) * + (p[1, i] - y[i]) * + (p[1, i] - y[i] + exp(p[2, i]) * sinh(exp(-p[2, i]) * (p[1, i] - y[i]))) + ) / (1 + cosh(exp(-p[2, i]) * (p[1, i] - y[i]))) * δ𝑀[5, i] end end @@ -132,7 +147,12 @@ end # get the gain metric ############################## # GradientRegression -function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:GradientRegression,T<:AbstractFloat} +function get_gain( + ::Type{L}, + βˆ‘::Vector{T}, + Ξ»::T, + K, +) where {L<:GradientRegression,T<:AbstractFloat} βˆ‘[1]^2 / (βˆ‘[2] + Ξ» * βˆ‘[3]) / 2 end @@ -142,7 +162,12 @@ function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:MLE2P,T<:Abstra end # MultiClassRegression -function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:MultiClassRegression,T<:AbstractFloat} +function get_gain( + ::Type{L}, + βˆ‘::Vector{T}, + Ξ»::T, + K, +) where {L<:MultiClassRegression,T<:AbstractFloat} gain = zero(T) @inbounds for k = 1:K gain += βˆ‘[k]^2 / (βˆ‘[k+K] + Ξ» * βˆ‘[2*K+1]) / 2 @@ -151,7 +176,12 @@ function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:MultiClassRegre end # QuantileRegression -function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:QuantileRegression,T<:AbstractFloat} +function get_gain( + ::Type{L}, + βˆ‘::Vector{T}, + Ξ»::T, + K, +) where {L<:QuantileRegression,T<:AbstractFloat} abs(βˆ‘[1]) end @@ -161,7 +191,14 @@ function get_gain(::Type{L}, βˆ‘::Vector{T}, Ξ»::T, K) where {L<:L1Regression,T< end -function update_childs_βˆ‘!(::Type{L}, nodes, n, bin, feat, K) where {L<:Union{GradientRegression,QuantileRegression,L1Regression}} +function update_childs_βˆ‘!( + ::Type{L}, + nodes, + n, + bin, + feat, + K, +) where {L<:Union{GradientRegression,QuantileRegression,L1Regression}} nodes[n<<1].βˆ‘ .= nodes[n].hL[feat][(3*bin-2):(3*bin)] nodes[n<<1+1].βˆ‘ .= nodes[n].hR[feat][(3*bin-2):(3*bin)] return nothing diff --git a/src/models.jl b/src/models.jl index 0682f062..90959401 100644 --- a/src/models.jl +++ b/src/models.jl @@ -30,9 +30,9 @@ mutable struct EvoTreeRegressor{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Det colsample::T nbins::S alpha::T - monotone_constraints - rng - device + monotone_constraints::Any + rng::Any + device::Any end function EvoTreeRegressor(; kwargs...) @@ -53,16 +53,18 @@ function EvoTreeRegressor(; kwargs...) :alpha => 0.5, :monotone_constraints => Dict{Int,Int}(), :rng => 123, - :device => "cpu" + :device => "cpu", ) args_ignored = setdiff(keys(kwargs), keys(args)) args_ignored_str = join(args_ignored, ", ") - length(args_ignored) > 0 && @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." + length(args_ignored) > 0 && + @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." args_default = setdiff(keys(args), keys(kwargs)) args_default_str = join(args_default, ", ") - length(args_default) > 0 && @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." + length(args_default) > 0 && + @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." args_override = intersect(keys(args), keys(kwargs)) for arg in args_override @@ -86,7 +88,9 @@ function EvoTreeRegressor(; kwargs...) elseif args[:loss] == :quantile L = Quantile else - error("Invalid loss: $(args[:loss]). Only [`:linear`, `:logistic`, `:L1`, `:quantile`] are supported at the moment by EvoTreeRegressor.") + error( + "Invalid loss: $(args[:loss]). Only [`:linear`, `:logistic`, `:L1`, `:quantile`] are supported at the moment by EvoTreeRegressor.", + ) end model = EvoTreeRegressor{L,T,Int}( @@ -102,7 +106,8 @@ function EvoTreeRegressor(; kwargs...) T(args[:alpha]), args[:monotone_constraints], args[:rng], - args[:device]) + args[:device], + ) return model end @@ -119,9 +124,9 @@ mutable struct EvoTreeCount{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Probabi colsample::T nbins::S alpha::T - monotone_constraints - rng - device + monotone_constraints::Any + rng::Any + device::Any end function EvoTreeCount(; kwargs...) @@ -141,16 +146,18 @@ function EvoTreeCount(; kwargs...) :alpha => 0.5, :monotone_constraints => Dict{Int,Int}(), :rng => 123, - :device => "cpu" + :device => "cpu", ) args_ignored = setdiff(keys(kwargs), keys(args)) args_ignored_str = join(args_ignored, ", ") - length(args_ignored) > 0 && @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." + length(args_ignored) > 0 && + @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." args_default = setdiff(keys(args), keys(kwargs)) args_default_str = join(args_default, ", ") - length(args_default) > 0 && @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." + length(args_default) > 0 && + @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." args_override = intersect(keys(args), keys(kwargs)) for arg in args_override @@ -174,7 +181,8 @@ function EvoTreeCount(; kwargs...) T(args[:alpha]), args[:monotone_constraints], args[:rng], - args[:device]) + args[:device], + ) return model end @@ -190,8 +198,8 @@ mutable struct EvoTreeClassifier{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Pr colsample::T nbins::S alpha::T - rng - device + rng::Any + device::Any end function EvoTreeClassifier(; kwargs...) @@ -210,16 +218,18 @@ function EvoTreeClassifier(; kwargs...) :nbins => 32, :alpha => 0.5, :rng => 123, - :device => "cpu" + :device => "cpu", ) args_ignored = setdiff(keys(kwargs), keys(args)) args_ignored_str = join(args_ignored, ", ") - length(args_ignored) > 0 && @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." + length(args_ignored) > 0 && + @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." args_default = setdiff(keys(args), keys(kwargs)) args_default_str = join(args_default, ", ") - length(args_default) > 0 && @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." + length(args_default) > 0 && + @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." args_override = intersect(keys(args), keys(kwargs)) for arg in args_override @@ -242,7 +252,8 @@ function EvoTreeClassifier(; kwargs...) args[:nbins], T(args[:alpha]), args[:rng], - args[:device]) + args[:device], + ) return model end @@ -258,9 +269,9 @@ mutable struct EvoTreeMLE{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Probabili colsample::T nbins::S alpha::T - monotone_constraints - rng - device + monotone_constraints::Any + rng::Any + device::Any end function EvoTreeMLE(; kwargs...) @@ -281,16 +292,18 @@ function EvoTreeMLE(; kwargs...) :alpha => 0.5, :monotone_constraints => Dict{Int,Int}(), :rng => 123, - :device => "cpu" + :device => "cpu", ) args_ignored = setdiff(keys(kwargs), keys(args)) args_ignored_str = join(args_ignored, ", ") - length(args_ignored) > 0 && @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." + length(args_ignored) > 0 && + @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." args_default = setdiff(keys(args), keys(kwargs)) args_default_str = join(args_default, ", ") - length(args_default) > 0 && @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." + length(args_default) > 0 && + @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." args_override = intersect(keys(args), keys(kwargs)) for arg in args_override @@ -306,7 +319,9 @@ function EvoTreeMLE(; kwargs...) elseif args[:loss] == :logistic L = LogisticDist else - error("Invalid loss: $(args[:loss]). Only `:normal`, `:gaussian` and `:logistic` are supported at the moment by EvoTreeMLE.") + error( + "Invalid loss: $(args[:loss]). Only `:normal`, `:gaussian` and `:logistic` are supported at the moment by EvoTreeMLE.", + ) end model = EvoTreeMLE{L,T,Int}( @@ -322,7 +337,8 @@ function EvoTreeMLE(; kwargs...) T(args[:alpha]), args[:monotone_constraints], args[:rng], - args[:device]) + args[:device], + ) return model end @@ -339,9 +355,9 @@ mutable struct EvoTreeGaussian{L<:ModelType,T<:AbstractFloat,S<:Int} <: MMI.Prob colsample::T nbins::S alpha::T - monotone_constraints - rng - device + monotone_constraints::Any + rng::Any + device::Any end function EvoTreeGaussian(; kwargs...) @@ -360,16 +376,18 @@ function EvoTreeGaussian(; kwargs...) :alpha => 0.5, :monotone_constraints => Dict{Int,Int}(), :rng => 123, - :device => "cpu" + :device => "cpu", ) args_ignored = setdiff(keys(kwargs), keys(args)) args_ignored_str = join(args_ignored, ", ") - length(args_ignored) > 0 && @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." + length(args_ignored) > 0 && + @info "Following $(length(args_ignored)) provided arguments will be ignored: $(args_ignored_str)." args_default = setdiff(keys(args), keys(kwargs)) args_default_str = join(args_default, ", ") - length(args_default) > 0 && @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." + length(args_default) > 0 && + @info "Following $(length(args_default)) arguments were not provided and will be set to default: $(args_default_str)." args_override = intersect(keys(args), keys(kwargs)) for arg in args_override @@ -393,10 +411,17 @@ function EvoTreeGaussian(; kwargs...) T(args[:alpha]), args[:monotone_constraints], args[:rng], - args[:device]) + args[:device], + ) return model end # const EvoTypes = Union{EvoTreeRegressor,EvoTreeCount,EvoTreeClassifier,EvoTreeGaussian} -const EvoTypes{L,T,S} = Union{EvoTreeRegressor{L,T,S},EvoTreeCount{L,T,S},EvoTreeClassifier{L,T,S},EvoTreeGaussian{L,T,S},EvoTreeMLE{L,T,S}} \ No newline at end of file +const EvoTypes{L,T,S} = Union{ + EvoTreeRegressor{L,T,S}, + EvoTreeCount{L,T,S}, + EvoTreeClassifier{L,T,S}, + EvoTreeGaussian{L,T,S}, + EvoTreeMLE{L,T,S}, +} diff --git a/src/plot.jl b/src/plot.jl index e99cefd4..0f04dcaf 100644 --- a/src/plot.jl +++ b/src/plot.jl @@ -20,16 +20,21 @@ function get_adj_list(tree::EvoTrees.Tree) push!(adj, []) end end - return (map=map, adj=adj) + return (map = map, adj = adj) end function get_shapes(tree_layout) shapes = Vector(undef, length(tree_layout)) - for i = 1:length(tree_layout) + for i = eachindex(tree_layout) x, y = tree_layout[i][1], tree_layout[i][2] # center point x_buff = 0.45 y_buff = 0.45 - shapes[i] = [(x - x_buff, y + y_buff), (x + x_buff, y + y_buff), (x + x_buff, y - y_buff), (x - x_buff, y - y_buff)] + shapes[i] = [ + (x - x_buff, y + y_buff), + (x + x_buff, y + y_buff), + (x + x_buff, y - y_buff), + (x - x_buff, y - y_buff), + ] end return shapes end @@ -37,13 +42,15 @@ end function get_annotations(tree_layout, map, tree, var_names) # annotations = Vector{Tuple{Float64, Float64, String, Tuple}}(undef, length(tree_layout)) annotations = [] - for i = 1:length(tree_layout) + for i = eachindex(tree_layout) x, y = tree_layout[i][1], tree_layout[i][2] # center point if tree.split[map[i]] - feat = isnothing(var_names) ? "feat: " * string(tree.feat[map[i]]) : var_names[tree.feat[map[i]]] - txt = "$feat\n" * string(round(tree.cond_float[map[i]], sigdigits=3)) + feat = + isnothing(var_names) ? "feat: " * string(tree.feat[map[i]]) : + var_names[tree.feat[map[i]]] + txt = "$feat\n" * string(round(tree.cond_float[map[i]], sigdigits = 3)) else - txt = "pred:\n" * string(round(tree.pred[1, map[i]], sigdigits=3)) + txt = "pred:\n" * string(round(tree.pred[1, map[i]], sigdigits = 3)) end # annotations[i] = (x, y, txt, (9, :white, "helvetica")) push!(annotations, (x, y, txt, 10)) @@ -54,16 +61,22 @@ end function get_curves(adj, tree_layout, shapes) curves = [] num_curves = sum(length.(adj)) - for i = 1:length(adj) - for j = 1:length(adj[i]) + for i = eachindex(adj) + for j = eachindex(adj[i]) # curves is a length 2 tuple: (vector Xs, vector Ys) - push!(curves, ([tree_layout[i][1], tree_layout[adj[i][j]][1]], [shapes[i][3][2], shapes[adj[i][j]][1][2]])) + push!( + curves, + ( + [tree_layout[i][1], tree_layout[adj[i][j]][1]], + [shapes[i][3][2], shapes[adj[i][j]][1][2]], + ), + ) end end return curves end -@recipe function plot(tree::EvoTrees.Tree, var_names=nothing) +@recipe function plot(tree::EvoTrees.Tree, var_names = nothing) map, adj = EvoTrees.get_adj_list(tree) tree_layout = length(adj) == 1 ? [[0.0, 0.0]] : NetworkLayout.buchheim(adj) @@ -82,7 +95,7 @@ end size --> size annotations --> annotations - for i = 1:length(shapes) + for i = eachindex(shapes) @series begin fillcolor = length(adj[i]) == 0 ? "#84DCC6" : "#C8D3D5" fillcolor --> fillcolor @@ -91,7 +104,7 @@ end end end - for i = 1:length(curves) + for i = eachindex(curves) @series begin seriestype --> :curves return curves[i] @@ -99,7 +112,7 @@ end end end -@recipe function plot(model::EvoTrees.GBTree, n=1, var_names=nothing) +@recipe function plot(model::EvoTrees.GBTree, n = 1, var_names = nothing) isnothing(var_names) @@ -121,7 +134,7 @@ end size --> size annotations --> annotations - for i = 1:length(shapes) + for i = eachindex(shapes) @series begin fillcolor = length(adj[i]) == 0 ? "#84DCC6" : "#C8D3D5" fillcolor --> fillcolor @@ -130,7 +143,7 @@ end end end - for i = 1:length(curves) + for i = eachindex(curves) @series begin seriestype --> :curves return curves[i] diff --git a/src/predict.jl b/src/predict.jl index 415f1d48..f6d3c64e 100644 --- a/src/predict.jl +++ b/src/predict.jl @@ -2,7 +2,8 @@ function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L<:GradientRegress @inbounds @threads for i in axes(X, 1) nid = 1 @inbounds while tree.split[nid] - X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : nid = nid << 1 + 1 + X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : + nid = nid << 1 + 1 end @inbounds pred[1, i] += tree.pred[1, nid] end @@ -13,7 +14,8 @@ function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L<:Logistic,T} @inbounds @threads for i in axes(X, 1) nid = 1 @inbounds while tree.split[nid] - X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : nid = nid << 1 + 1 + X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : + nid = nid << 1 + 1 end @inbounds pred[1, i] = clamp(pred[1, i] + tree.pred[1, nid], -15, 15) end @@ -24,7 +26,8 @@ function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L<:MLE2P,T} @inbounds @threads for i in axes(X, 1) nid = 1 @inbounds while tree.split[nid] - X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : nid = nid << 1 + 1 + X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : + nid = nid << 1 + 1 end @inbounds pred[1, i] += tree.pred[1, nid] @inbounds pred[2, i] = max(-15, pred[2, i] + tree.pred[2, nid]) @@ -41,7 +44,8 @@ function predict!(pred::Matrix, tree::Tree{L,T}, X, K) where {L,T} @inbounds @threads for i in axes(X, 1) nid = 1 @inbounds while tree.split[nid] - X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : nid = nid << 1 + 1 + X[i, tree.feat[nid]] < tree.cond_float[nid] ? nid = nid << 1 : + nid = nid << 1 + 1 end @inbounds for k = 1:K pred[k, i] += tree.pred[k, nid] @@ -87,15 +91,35 @@ function predict(model::GBTree{L,T,S}, X::AbstractMatrix) where {L,T,S} end -function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:GradientRegression,T,S} +function pred_leaf_cpu!( + pred, + n, + βˆ‘::Vector, + params::EvoTypes{L,T,S}, + K, + δ𝑀, + 𝑖, +) where {L<:GradientRegression,T,S} pred[1, n] = -params.eta * βˆ‘[1] / (βˆ‘[2] + params.lambda * βˆ‘[3]) end -function pred_scalar_cpu!(βˆ‘::Vector{T}, params::EvoTypes, K) where {L<:GradientRegression,T,S} +function pred_scalar_cpu!( + βˆ‘::Vector{T}, + params::EvoTypes, + K, +) where {L<:GradientRegression,T,S} -params.eta * βˆ‘[1] / (βˆ‘[2] + params.lambda * βˆ‘[3]) end # prediction in Leaf - MLE2P -function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:MLE2P,T,S} +function pred_leaf_cpu!( + pred, + n, + βˆ‘::Vector, + params::EvoTypes{L,T,S}, + K, + δ𝑀, + 𝑖, +) where {L<:MLE2P,T,S} pred[1, n] = -params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5]) pred[2, n] = -params.eta * βˆ‘[2] / (βˆ‘[4] + params.lambda * βˆ‘[5]) end @@ -104,14 +128,30 @@ function pred_scalar_cpu!(βˆ‘::Vector{T}, params::EvoTypes{L,T,S}, K) where {L<: end # prediction in Leaf - MultiClassRegression -function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:MultiClassRegression,T,S} +function pred_leaf_cpu!( + pred, + n, + βˆ‘::Vector, + params::EvoTypes{L,T,S}, + K, + δ𝑀, + 𝑖, +) where {L<:MultiClassRegression,T,S} @inbounds for k = 1:K pred[k, n] = -params.eta * βˆ‘[k] / (βˆ‘[k+K] + params.lambda * βˆ‘[2*K+1]) end end # prediction in Leaf - QuantileRegression -function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:QuantileRegression,T,S} +function pred_leaf_cpu!( + pred, + n, + βˆ‘::Vector, + params::EvoTypes{L,T,S}, + K, + δ𝑀, + 𝑖, +) where {L<:QuantileRegression,T,S} pred[1, n] = params.eta * quantile(δ𝑀[2, 𝑖], params.alpha) / (1 + params.lambda) # pred[1,n] = params.eta * quantile(view(δ𝑀, 2, 𝑖), params.alpha) / (1 + params.lambda) end @@ -120,7 +160,15 @@ end # end # prediction in Leaf - L1Regression -function pred_leaf_cpu!(pred, n, βˆ‘::Vector, params::EvoTypes{L,T,S}, K, δ𝑀, 𝑖) where {L<:L1Regression,T,S} +function pred_leaf_cpu!( + pred, + n, + βˆ‘::Vector, + params::EvoTypes{L,T,S}, + K, + δ𝑀, + 𝑖, +) where {L<:L1Regression,T,S} pred[1, n] = params.eta * βˆ‘[1] / (βˆ‘[3] * (1 + params.lambda)) end function pred_scalar_cpu!(βˆ‘::Vector, params::EvoTypes{L,T,S}, K) where {L<:L1Regression,T,S} diff --git a/src/structs.jl b/src/structs.jl index 64a22055..17752dd2 100644 --- a/src/structs.jl +++ b/src/structs.jl @@ -19,7 +19,8 @@ function TrainNode(nvars, nbins, K, T) [zeros(T, (2 * K + 1) * nbins) for j = 1:nvars], [zeros(T, (2 * K + 1) * nbins) for j = 1:nvars], [zeros(T, (2 * K + 1) * nbins) for j = 1:nvars], - zeros(T, nbins, nvars)) + zeros(T, nbins, nvars), + ) return node end @@ -40,7 +41,8 @@ function Tree{L,T}(x::Vector{T}) where {L,T} zeros(T, 1), zeros(T, 1), reshape(x, :, 1), - zeros(Bool, 1)) + zeros(Bool, 1), + ) end function Tree{L,T}(depth, K, ::T) where {L,T} @@ -50,7 +52,7 @@ function Tree{L,T}(depth, K, ::T) where {L,T} zeros(T, 2^depth - 1), zeros(T, 2^depth - 1), zeros(T, K, 2^depth - 1), - zeros(Bool, 2^depth - 1) + zeros(Bool, 2^depth - 1), ) end @@ -67,6 +69,6 @@ struct GBTree{L,T,S} params::EvoTypes metric::Metric K::Int - info + info::Any end -(m::GBTree)(x::AbstractMatrix) = predict(m, x) \ No newline at end of file +(m::GBTree)(x::AbstractMatrix) = predict(m, x) diff --git a/test/MLJ.jl b/test/MLJ.jl index 5692065a..3d28a3a8 100644 --- a/test/MLJ.jl +++ b/test/MLJ.jl @@ -15,18 +15,18 @@ X = MLJBase.table(X) # @load EvoTreeRegressor # linear regression -tree_model = EvoTreeRegressor(max_depth=5, eta=0.05, nrounds=10) +tree_model = EvoTreeRegressor(max_depth = 5, eta = 0.05, nrounds = 10) # logistic regression -tree_model = EvoTreeRegressor(loss=:logistic, max_depth=5, eta=0.05, nrounds=10) +tree_model = EvoTreeRegressor(loss = :logistic, max_depth = 5, eta = 0.05, nrounds = 10) # quantile regression # tree_model = EvoTreeRegressor(loss=:quantile, alpha=0.75, max_depth=5, eta=0.05, nrounds=10) mach = machine(tree_model, X, y) -train, test = partition(eachindex(y), 0.7, shuffle=true); # 70:30 split -fit!(mach, rows=train, verbosity=1) +train, test = partition(eachindex(y), 0.7, shuffle = true); # 70:30 split +fit!(mach, rows = train, verbosity = 1) mach.model.nrounds += 10 -fit!(mach, rows=train, verbosity=1) +fit!(mach, rows = train, verbosity = 1) # predict on train data pred_train = predict(mach, selectrows(X, train)) @@ -64,15 +64,16 @@ mean(abs.(pred_test - selectrows(Y, test))) ################################################## X, y = @load_crabs -tree_model = EvoTreeClassifier(max_depth=4, eta=0.05, lambda=0.0, gamma=0.0, nrounds=10) +tree_model = + EvoTreeClassifier(max_depth = 4, eta = 0.05, lambda = 0.0, gamma = 0.0, nrounds = 10) # @load EvoTreeRegressor mach = machine(tree_model, X, y) -train, test = partition(eachindex(y), 0.7, shuffle=true); # 70:30 split -fit!(mach, rows=train, verbosity=1) +train, test = partition(eachindex(y), 0.7, shuffle = true); # 70:30 split +fit!(mach, rows = train, verbosity = 1) mach.model.nrounds += 50 -fit!(mach, rows=train, verbosity=1) +fit!(mach, rows = train, verbosity = 1) pred_train = predict(mach, selectrows(X, train)) pred_train_mode = predict_mode(mach, selectrows(X, train)) @@ -95,7 +96,7 @@ Y = rand(UInt8, size(X, 1)) 𝑖 = collect(1:size(X, 1)) # train-eval split -𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -105,22 +106,29 @@ Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval] # @load EvoTreeRegressor tree_model = EvoTreeCount( - loss=:poisson, metric=:poisson, - nrounds=10, - lambda=0.0, gamma=0.0, eta=0.1, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=0.5, nbins=32) + loss = :poisson, + metric = :poisson, + nrounds = 10, + lambda = 0.0, + gamma = 0.0, + eta = 0.1, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 0.5, + nbins = 32, +) X = MLJBase.table(X) X = MLJBase.matrix(X) # typeof(X) mach = machine(tree_model, X, Y) -train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split -fit!(mach, rows=train, verbosity=1, force=true) +train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split +fit!(mach, rows = train, verbosity = 1, force = true) mach.model.nrounds += 10 -fit!(mach, rows=train, verbosity=1) +fit!(mach, rows = train, verbosity = 1) pred = predict(mach, selectrows(X, train)) pred_mean = predict_mean(mach, selectrows(X, train)) @@ -136,7 +144,7 @@ Y = rand(size(X, 1)) 𝑖 = collect(1:size(X, 1)) # train-eval split -𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -146,20 +154,26 @@ Y_train, Y_eval = Y[𝑖_train], Y[𝑖_eval] # @load EvoTreeRegressor tree_model = EvoTreeGaussian( - nrounds=10, - lambda=0.0, gamma=0.0, eta=0.1, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=0.5, nbins=32) + nrounds = 10, + lambda = 0.0, + gamma = 0.0, + eta = 0.1, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 0.5, + nbins = 32, +) X = MLJBase.table(X) # typeof(X) mach = machine(tree_model, X, Y) -train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split -fit!(mach, rows=train, verbosity=1, force=true) +train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split +fit!(mach, rows = train, verbosity = 1, force = true) mach.model.nrounds += 10 -fit!(mach, rows=train, verbosity=1) +fit!(mach, rows = train, verbosity = 1) pred = predict(mach, selectrows(X, train)) pred_mean = predict_mean(mach, selectrows(X, train)) @@ -181,7 +195,7 @@ Y = rand(size(X, 1)) 𝑖 = collect(1:size(X, 1)) # train-eval split -𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -191,21 +205,27 @@ y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] # @load EvoTreeRegressor tree_model = EvoTreeMLE( - loss=:logistic, - nrounds=10, - lambda=1.0, gamma=0.0, eta=0.1, - max_depth=6, min_weight=32.0, - rowsample=0.5, colsample=0.5, nbins=32) + loss = :logistic, + nrounds = 10, + lambda = 1.0, + gamma = 0.0, + eta = 0.1, + max_depth = 6, + min_weight = 32.0, + rowsample = 0.5, + colsample = 0.5, + nbins = 32, +) X = MLJBase.table(X) # typeof(X) mach = machine(tree_model, X, Y) -train, test = partition(eachindex(Y), 0.8, shuffle=true); # 70:30 split -fit!(mach, rows=train, verbosity=1, force=true) +train, test = partition(eachindex(Y), 0.8, shuffle = true); # 70:30 split +fit!(mach, rows = train, verbosity = 1, force = true) mach.model.nrounds += 10 -fit!(mach, rows=train, verbosity=1) +fit!(mach, rows = train, verbosity = 1) pred = predict(mach, selectrows(X, train)) pred_mean = predict_mean(mach, selectrows(X, train)) diff --git a/test/core.jl b/test/core.jl index 68dc7ed7..5bb2c65b 100644 --- a/test/core.jl +++ b/test/core.jl @@ -13,7 +13,7 @@ Y = sigmoid(Y) 𝑖 = collect(1:size(X, 1)) # train-eval split -𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -24,16 +24,31 @@ y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] @testset "EvoTreeRegressor - Linear" begin # linear params1 = EvoTreeRegressor( - loss=:linear, - nrounds=100, nbins=100, - lambda=0.5, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :linear, + nrounds = 100, + nbins = 100, + lambda = 0.5, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mse, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :mse, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -43,16 +58,30 @@ end @testset "EvoTreeRegressor - Logistic" begin params1 = EvoTreeRegressor( - loss=:logistic, - nrounds=100, - lambda=0.5, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :logistic, + nrounds = 100, + lambda = 0.5, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:logloss, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :logloss, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -62,16 +91,30 @@ end @testset "EvoTreeRegressor - Gamma" begin params1 = EvoTreeRegressor( - loss=:gamma, - nrounds=100, - lambda=0.5, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :gamma, + nrounds = 100, + lambda = 0.5, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:gamma, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :gamma, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -81,16 +124,30 @@ end @testset "EvoTreeRegressor - Tweedie" begin params1 = EvoTreeRegressor( - loss=:tweedie, - nrounds=100, - lambda=0.5, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :tweedie, + nrounds = 100, + lambda = 0.5, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:tweedie, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :tweedie, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -100,16 +157,32 @@ end @testset "EvoTreeRegressor - L1" begin params1 = EvoTreeRegressor( - loss=:L1, alpha=0.5, - nrounds=100, nbins=100, - lambda=0.5, gamma=0.0, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :L1, + alpha = 0.5, + nrounds = 100, + nbins = 100, + lambda = 0.5, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:mae, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :mae, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -119,16 +192,32 @@ end @testset "EvoTreeRegressor - Quantile" begin params1 = EvoTreeRegressor( - loss=:quantile, alpha=0.5, - nrounds=100, nbins=100, - lambda=0.5, gamma=0.0, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :quantile, + alpha = 0.5, + nrounds = 100, + nbins = 100, + lambda = 0.5, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:quantile, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :quantile, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -138,16 +227,30 @@ end @testset "EvoTreeCount - Count" begin params1 = EvoTreeCount( - loss=:poisson, - nrounds=100, - lambda=0.5, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :poisson, + nrounds = 100, + lambda = 0.5, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval) mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:poisson, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :poisson, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval) mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -157,16 +260,31 @@ end @testset "EvoTreeMLE - Gaussian" begin params1 = EvoTreeMLE( - loss=:gaussian, - nrounds=100, nbins=100, - lambda=0.0, gamma=0.0, eta=0.05, - max_depth=6, min_weight=10.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :gaussian, + nrounds = 100, + nbins = 100, + lambda = 0.0, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 10.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval)[:, 1] mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:gaussian, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :gaussian, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval)[:, 1] mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -176,16 +294,31 @@ end @testset "EvoTreeMLE - Logistic" begin params1 = EvoTreeMLE( - loss=:logistic, - nrounds=100, nbins=100, - lambda=0.0, gamma=0.0, eta=0.05, - max_depth=6, min_weight=10.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + loss = :logistic, + nrounds = 100, + nbins = 100, + lambda = 0.0, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 10.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval)[:, 1] mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, metric=:logistic, print_every_n=25) + model = fit_evotree( + params1; + x_train, + y_train, + x_eval, + y_eval, + metric = :logistic, + print_every_n = 25, + ) preds = EvoTrees.predict(model, x_eval)[:, 1] mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -195,15 +328,22 @@ end @testset "EvoTreeGaussian - Gaussian" begin params1 = EvoTreeGaussian( - nrounds=100, nbins=100, - lambda=0.0, gamma=0.0, eta=0.05, - max_depth=6, min_weight=10.0, - rowsample=0.5, colsample=1.0, rng=123) - - model, cache = EvoTrees.init_evotree(params1, x_train, y_train) + nrounds = 100, + nbins = 100, + lambda = 0.0, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 10.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) + + model, cache = EvoTrees.init_evotree(params1; x_train, y_train) preds_ini = EvoTrees.predict(model, x_eval)[:, 1] mse_error_ini = mean(abs.(preds_ini .- y_eval) .^ 2) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds = EvoTrees.predict(model, x_eval)[:, 1] mse_error = mean(abs.(preds .- y_eval) .^ 2) @@ -213,11 +353,18 @@ end @testset "EvoTrees - Feature Importance" begin params1 = EvoTreeRegressor( - loss=:linear, - nrounds=100, nbins=100, - lambda=0.5, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, rng=123) + loss = :linear, + nrounds = 100, + nbins = 100, + lambda = 0.5, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + ) model = fit_evotree(params1; x_train, y_train) features_gain = importance(model) diff --git a/test/gpu_base.jl b/test/gpu_base.jl index 22b98d5c..78c7c282 100644 --- a/test/gpu_base.jl +++ b/test/gpu_base.jl @@ -18,7 +18,7 @@ Y = sigmoid(Y) 𝑖 = collect(1:size(X, 1)) # train-eval split -𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -29,43 +29,85 @@ y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] ################################ # linear ################################ -params1 = EvoTreeRegressor(T=Float32, - loss=:linear, metric=:none, - nrounds=200, nbins=64, - lambda=0.5, gamma=0.1, eta=0.1, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0) +params1 = EvoTreeRegressor( + T = Float32, + loss = :linear, + metric = :none, + nrounds = 200, + nbins = 64, + lambda = 0.5, + gamma = 0.1, + eta = 0.1, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, +) @time model = fit_evotree_gpu(params1; x_train, y_train); @time pred_train_linear = predict_gpu(model, X_train) x_perm = sortperm(X_train[:, 1]) -plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") -plot!(X_train[:, 1][x_perm], pred_train_linear[x_perm], color="navy", linewidth=1.5, label="Linear") +plot( + X_train, + Y_train, + msize = 1, + mcolor = "gray", + mswidth = 0, + background_color = RGB(1, 1, 1), + seriestype = :scatter, + xaxis = ("feature"), + yaxis = ("target"), + legend = true, + label = "", +) +plot!( + X_train[:, 1][x_perm], + pred_train_linear[x_perm], + color = "navy", + linewidth = 1.5, + label = "Linear", +) # savefig("figures/regression_sinus_gpu.png") -params1 = EvoTreeRegressor(T=Float32, - loss=:linear, metric=:mse, - nrounds=200, nbins=64, - lambda=0.5, gamma=0.1, eta=0.1, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, - device="gpu") +params1 = EvoTreeRegressor( + T = Float32, + loss = :linear, + metric = :mse, + nrounds = 200, + nbins = 64, + lambda = 0.5, + gamma = 0.1, + eta = 0.1, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + device = "gpu", +) -@time model = fit_evotree_gpu(params1; x_train, y_train, print_every_n=25); -@time model = fit_evotree_gpu(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +@time model = fit_evotree_gpu(params1; x_train, y_train, print_every_n = 25); +@time model = fit_evotree_gpu(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25); @time pred_train_linear = predict_gpu(model, x_train) ################################ # Logistic ################################ -params1 = EvoTreeRegressor(T=Float32, - loss=:logistic, metric=:logloss, - nrounds=200, nbins=64, - lambda=0.5, gamma=0.1, eta=0.1, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, - device="gpu") +params1 = EvoTreeRegressor( + T = Float32, + loss = :logistic, + metric = :logloss, + nrounds = 200, + nbins = 64, + lambda = 0.5, + gamma = 0.1, + eta = 0.1, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + device = "gpu", +) @time model = fit_evotree_gpu(params1; x_train, y_train); @time pred_train_linear = predict_gpu(model, X_train) @@ -73,18 +115,30 @@ params1 = EvoTreeRegressor(T=Float32, ################################ # Gaussian ################################ -params1 = EvoTreeGaussian(T=Float64, - loss=:gaussian, metric=:gaussian, - nrounds=200, nbins=64, - lambda=1.0, gamma=0.1, eta=0.1, - max_depth=5, min_weight=100.0, - rowsample=0.5, colsample=1.0, rng=123, - device="gpu") +params1 = EvoTreeGaussian( + T = Float64, + loss = :gaussian, + metric = :gaussian, + nrounds = 200, + nbins = 64, + lambda = 1.0, + gamma = 0.1, + eta = 0.1, + max_depth = 5, + min_weight = 100.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, + device = "gpu", +) -@time model = fit_evotree_gpu(params1; x_train, y_train, print_every_n=25); +@time model = fit_evotree_gpu(params1; x_train, y_train, print_every_n = 25); @time pred_train_gauss = predict_gpu(model, x_train) -pred_gauss = [Distributions.Normal(pred_train_gauss[i, 1], pred_train_gauss[i, 2]) for i in axes(pred_train_gauss, 1)] +pred_gauss = [ + Distributions.Normal(pred_train_gauss[i, 1], pred_train_gauss[i, 2]) for + i in axes(pred_train_gauss, 1) +] pred_q20 = quantile.(pred_gauss, 0.2) pred_q80 = quantile.(pred_gauss, 0.8) diff --git a/test/monotonic.jl b/test/monotonic.jl index 6233afb4..4acfde0d 100644 --- a/test/monotonic.jl +++ b/test/monotonic.jl @@ -15,7 +15,7 @@ seed = 123 # train-eval split - 𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) + 𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -28,27 +28,43 @@ ###################################### # benchmark params1 = EvoTreeRegressor( - device="cpu", - loss=:linear, metric=:mse, - nrounds=200, nbins=32, - lambda=1.0, gamma=0.0, eta=0.05, - max_depth=6, min_weight=0.0, - rowsample=0.5, colsample=1.0, rng=seed) - - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + device = "cpu", + loss = :linear, + metric = :mse, + nrounds = 200, + nbins = 32, + lambda = 1.0, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 0.0, + rowsample = 0.5, + colsample = 1.0, + rng = seed, + ) + + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds_ref = predict(model, x_train) # monotonic constraint params1 = EvoTreeRegressor( - device="cpu", - loss=:linear, metric=:mse, - nrounds=200, nbins=32, - lambda=1.0, gamma=0.0, eta=0.5, - max_depth=6, min_weight=0.0, - monotone_constraints=Dict(1 => 1), - rowsample=0.5, colsample=1.0, rng=seed) - - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + device = "cpu", + loss = :linear, + metric = :mse, + nrounds = 200, + nbins = 32, + lambda = 1.0, + gamma = 0.0, + eta = 0.5, + max_depth = 6, + min_weight = 0.0, + monotone_constraints = Dict(1 => 1), + rowsample = 0.5, + colsample = 1.0, + rng = seed, + ) + + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds_mono = predict(model, x_train) # using Plots @@ -100,27 +116,43 @@ ###################################### # benchmark params1 = EvoTreeRegressor( - device="cpu", - loss=:logistic, metric=:logloss, - nrounds=200, nbins=32, - lambda=0.05, gamma=0.0, eta=0.05, - max_depth=6, min_weight=0.0, - rowsample=0.5, colsample=1.0, rng=seed) - - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + device = "cpu", + loss = :logistic, + metric = :logloss, + nrounds = 200, + nbins = 32, + lambda = 0.05, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 0.0, + rowsample = 0.5, + colsample = 1.0, + rng = seed, + ) + + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds_ref = predict(model, x_train) # monotonic constraint params1 = EvoTreeRegressor( - device="cpu", - loss=:logistic, metric=:logloss, - nrounds=200, nbins=32, - lambda=0.05, gamma=0.0, eta=0.05, - max_depth=6, min_weight=0.0, - monotone_constraints=Dict(1 => 1), - rowsample=0.5, colsample=1.0, rng=seed) - - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + device = "cpu", + loss = :logistic, + metric = :logloss, + nrounds = 200, + nbins = 32, + lambda = 0.05, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 0.0, + monotone_constraints = Dict(1 => 1), + rowsample = 0.5, + colsample = 1.0, + rng = seed, + ) + + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds_mono = predict(model, x_train) # using Plots @@ -172,27 +204,41 @@ ###################################### # linear - benchmark params1 = EvoTreeGaussian( - device="cpu", - metric=:gaussian, - nrounds=200, nbins=32, - lambda=1.0, gamma=0.0, eta=0.05, - max_depth=6, min_weight=0.0, - rowsample=0.5, colsample=1.0, rng=seed) - - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + device = "cpu", + metric = :gaussian, + nrounds = 200, + nbins = 32, + lambda = 1.0, + gamma = 0.0, + eta = 0.05, + max_depth = 6, + min_weight = 0.0, + rowsample = 0.5, + colsample = 1.0, + rng = seed, + ) + + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds_ref = predict(model, x_train) # monotonic constraint params1 = EvoTreeGaussian( - device="cpu", - metric=:gaussian, - nrounds=200, nbins=32, - lambda=1.0, gamma=0.0, eta=0.5, - max_depth=6, min_weight=0.0, - monotone_constraints=Dict(1 => 1), - rowsample=0.5, colsample=1.0, rng=seed) - - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + device = "cpu", + metric = :gaussian, + nrounds = 200, + nbins = 32, + lambda = 1.0, + gamma = 0.0, + eta = 0.5, + max_depth = 6, + min_weight = 0.0, + monotone_constraints = Dict(1 => 1), + rowsample = 0.5, + colsample = 1.0, + rng = seed, + ) + + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) preds_mono = predict(model, x_train) # using Plots diff --git a/test/plot.jl b/test/plot.jl index 2e8a598f..c182bd54 100644 --- a/test/plot.jl +++ b/test/plot.jl @@ -8,7 +8,7 @@ using EvoTrees # @load "data/model_gaussian_5.bson" model model = EvoTrees.load("data/model_linear_4.bson"); -var_names = ["var_$i" for i in 1:100] +var_names = ["var_$i" for i = 1:100] plot(model) plot(model, 2) plot(model, 3, var_names) @@ -17,15 +17,15 @@ plot(model.trees[2], var_names) typeof(tree_layout[1]) BezierCurve(tree_layout[1]) -mutable struct BCurve{T <: GeometryBasics.Point} +mutable struct BCurve{T<:GeometryBasics.Point} control_points::Vector{T} end function (bc::BCurve)(t::Real) p = zero(P2) n = length(bc.control_points) - 1 - for i in 0:n - p += bc.control_points[i + 1] * binomial(n, i) * (1 - t)^(n - i) * t^i + for i = 0:n + p += bc.control_points[i+1] * binomial(n, i) * (1 - t)^(n - i) * t^i end p end diff --git a/test/save_load.jl b/test/save_load.jl index 7b61af18..c6d8ca11 100644 --- a/test/save_load.jl +++ b/test/save_load.jl @@ -16,7 +16,7 @@ Y = sigmoid(Y) 𝑖 = collect(1:size(X, 1)) # train-eval split -𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) +𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -25,37 +25,45 @@ x_train, x_eval = X[𝑖_train, :], X[𝑖_eval, :] y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] # linear -params1 = EvoTreeRegressor(T=Float64, - loss=:linear, metric=:mse, - nrounds=200, nbins=64, - lambda=0.1, gamma=0.1, eta=0.05, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=1.0, - rng=123) - -m = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); +params1 = EvoTreeRegressor( + T = Float64, + loss = :linear, + metric = :mse, + nrounds = 200, + nbins = 64, + lambda = 0.1, + gamma = 0.1, + eta = 0.05, + max_depth = 6, + min_weight = 1.0, + rowsample = 0.5, + colsample = 1.0, + rng = 123, +) + +m = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25); p = m(x_eval) # serialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat"), m); # serialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat"), p); -m_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v172.dat")); -p_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v172.dat")); -pm_172 = m_172(x_eval) +# m_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v172.dat")); +# p_172 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v172.dat")); +# pm_172 = m_172(x_eval) -m_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v180.dat")); -p_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v180.dat")); -pm_180 = m_180(x_eval) +# m_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v180.dat")); +# p_180 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v180.dat")); +# pm_180 = m_180(x_eval) -m_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat")); -p_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat")); -pm_182 = m_182(x_eval) +# m_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-m-v182.dat")); +# p_182 = deserialize(joinpath(@__DIR__, "..", "data", "save-load-test-p-v182.dat")); +# pm_182 = m_182(x_eval) -@assert all(p .== p_172) -@assert all(p .== pm_172) -@assert all(p .== p_180) -@assert all(p .== pm_180) -@assert all(p .== p_182) -@assert all(p .== pm_182) +# @assert all(p .== p_172) +# @assert all(p .== pm_172) +# @assert all(p .== p_180) +# @assert all(p .== pm_180) +# @assert all(p .== p_182) +# @assert all(p .== pm_182) -@info "test successful! πŸš€" \ No newline at end of file +# @info "test successful! πŸš€" \ No newline at end of file From a4f7be00737300ee5d3e9d9f367e9d69bd4127fe Mon Sep 17 00:00:00 2001 From: jeremie Date: Mon, 17 Oct 2022 22:21:19 -0400 Subject: [PATCH 09/11] up --- experiments/random.jl | 4 ++-- experiments/speed_cpu_gpu.jl | 27 ++++++++++++++++----------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/experiments/random.jl b/experiments/random.jl index c924e37e..e02f333e 100644 --- a/experiments/random.jl +++ b/experiments/random.jl @@ -34,12 +34,12 @@ params1 = EvoTreeRegressor(T=Float32, # asus laptopt: for 1.25e6 no eval: 9.650007 seconds (893.53 k allocations: 2.391 GiB, 5.52% gc time) @time model = fit_evotree(params1; x_train, y_train); @time model = fit_evotree(params1; x_train, y_train, metric=:mse, x_eval, y_eval, print_every_n=10); -@btime model = fit_evotree($params1; $x_train, $y_train); +@btime model = fit_evotree(params1; x_train, y_train); @time pred_train = predict(model, x_train); @btime pred_train = predict(model, x_train); gain = importance(model) -@time model, cache = EvoTrees.init_evotree(params1, x_train, y_train); +@time model, cache = EvoTrees.init_evotree(params1; x_train, y_train); @time EvoTrees.grow_evotree!(model, cache); ############################# diff --git a/experiments/speed_cpu_gpu.jl b/experiments/speed_cpu_gpu.jl index 27afbbc9..3bbf032b 100644 --- a/experiments/speed_cpu_gpu.jl +++ b/experiments/speed_cpu_gpu.jl @@ -27,16 +27,16 @@ y_train, y_eval = Y[𝑖_train], Y[𝑖_eval] params_c = EvoTreeRegressor(T=Float32, loss=:linear, nrounds=100, - lambda=1.0, gamma=0.0, eta=0.1, + lambda=0.1, gamma=0.0, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=0.5, nbins=64); -params_c = EvoTrees.EvoTreeLogistic(T=Float32, - loss=:linear, - nrounds=100, - lambda=1.0, gamma=0.0, eta=0.1, - max_depth=6, min_weight=1.0, - rowsample=0.5, colsample=0.5, nbins=64); +# params_c = EvoTrees.EvoTreeLogistic(T=Float32, +# loss=:linear, +# nrounds=100, +# lambda=1.0, gamma=0.0, eta=0.1, +# max_depth=6, min_weight=1.0, +# rowsample=0.5, colsample=0.5, nbins=64); # params_c = EvoTreeGaussian(T=Float32, # loss=:gaussian, metric=:none, @@ -45,11 +45,11 @@ params_c = EvoTrees.EvoTreeLogistic(T=Float32, # max_depth=6, min_weight=1.0, # rowsample=0.5, colsample=0.5, nbins=64); -model_c, cache_c = EvoTrees.init_evotree(params_c, x_train, y_train); +model_c, cache_c = EvoTrees.init_evotree(params_c; x_train, y_train); # initialize from cache params_c = model_c.params -X_size = size(cache_c.X_bin) +X_size = size(cache_c.x_bin) # select random rows and cols sample!(params_c.rng, cache_c.𝑖_, cache_c.nodes[1].𝑖, replace=false, ordered=true); @@ -60,8 +60,11 @@ sample!(params_c.rng, cache_c.𝑗_, cache_c.𝑗, replace=false, ordered=true); 𝑖 = cache_c.nodes[1].𝑖 𝑗 = cache_c.𝑗 +L = EvoTrees.Linear +T = Float32 # build a new tree # 897.800 ΞΌs (6 allocations: 736 bytes) +@time EvoTrees.update_grads!(L, cache_c.δ𝑀, cache_c.pred, cache_c.y; alpha = params_c.alpha) # @btime EvoTrees.update_grads!($params_c.loss, $cache_c.δ𝑀, $cache_c.pred_cpu, $cache_c.Y_cpu, $params_c.Ξ±) # βˆ‘ = vec(sum(cache_c.Ξ΄[𝑖,:], dims=1)) # gain = EvoTrees.get_gain(params_c.loss, βˆ‘, params_c.Ξ») @@ -70,8 +73,10 @@ sample!(params_c.rng, cache_c.𝑗_, cache_c.𝑗, replace=false, ordered=true); # 62.530 ms (7229 allocations: 17.43 MiB) # 1.25e5: 9.187 ms (7358 allocations: 2.46 MiB) -tree = EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.lambda))) -@time EvoTrees.grow_tree!(tree, cache_c.nodes, params_c, cache_c.δ𝑀, cache_c.edges, cache_c.𝑗, cache_c.left, cache_c.left, cache_c.right, cache_c.X_bin, cache_c.K) +tree = EvoTrees.Tree{L,T}(params_c.max_depth, model_c.K, zero(typeof(params_c.lambda))) +@time EvoTrees.grow_tree!(tree, cache_c.nodes, params_c, cache_c.δ𝑀, cache_c.edges, cache_c.𝑗, cache_c.left, cache_c.left, cache_c.right, cache_c.x_bin, cache_c.K, cache_c.monotone_constraints) +@code_warntype EvoTrees.grow_tree!(tree, cache_c.nodes, params_c, cache_c.δ𝑀, cache_c.edges, cache_c.𝑗, cache_c.left, cache_c.left, cache_c.right, cache_c.x_bin, cache_c.K, cache_c.monotone_constraints) + @btime EvoTrees.grow_tree!($EvoTrees.Tree(params_c.max_depth, model_c.K, zero(typeof(params_c.Ξ»))), $cache_c.nodes, $params_c, $cache_c.δ𝑀, $cache_c.edges, $cache_c.𝑗, $cache_c.left, $cache_c.left, $cache_c.right, $cache_c.X_bin, $cache_c.K) @time EvoTrees.grow_tree!(EvoTrees.Tree(params_c.max_depth, model_c.K, params_c.Ξ»), params_c, cache_c.Ξ΄, cache_c.hist, cache_c.histL, cache_c.histR, cache_c.gains, cache_c.edges, 𝑖, 𝑗, 𝑛, cache_c.X_bin); From e35280883db5a4019877d12a8632b169abe1d6c4 Mon Sep 17 00:00:00 2001 From: "jeremie.desgagne.bouchard" Date: Tue, 18 Oct 2022 01:22:13 -0400 Subject: [PATCH 10/11] up --- experiments/benchmarks_v2.jl | 45 +++---- experiments/random.jl | 10 +- src/find_split.jl | 12 +- src/fit.jl | 2 +- src/gpu/find_split_gpu.jl | 12 +- src/gpu/fit_gpu.jl | 2 +- src/predict.jl | 6 +- test/monotonic.jl | 221 +++++++++++++++++------------------ 8 files changed, 155 insertions(+), 155 deletions(-) diff --git a/experiments/benchmarks_v2.jl b/experiments/benchmarks_v2.jl index 5a20465e..79a74c33 100644 --- a/experiments/benchmarks_v2.jl +++ b/experiments/benchmarks_v2.jl @@ -58,31 +58,32 @@ num_feat = Int(100) x_train = rand(nobs, num_feat) y_train = rand(size(x_train, 1)) -@info "xgboost train:" -@time m_xgb = xgboost(x_train, nrounds, label=y_train, param=params_xgb, metrics=metrics, nthread=nthread, silent=1); -@btime xgboost($x_train, $nrounds, label=$y_train, param=$params_xgb, metrics=$metrics, nthread=$nthread, silent=1); -@info "xgboost predict:" -@time pred_xgb = XGBoost.predict(m_xgb, x_train); -@btime XGBoost.predict($m_xgb, $x_train); +# @info "xgboost train:" +# @time m_xgb = xgboost(x_train, nrounds, label=y_train, param=params_xgb, metrics=metrics, nthread=nthread, silent=1); +# @btime xgboost($x_train, $nrounds, label=$y_train, param=$params_xgb, metrics=$metrics, nthread=$nthread, silent=1); +# @info "xgboost predict:" +# @time pred_xgb = XGBoost.predict(m_xgb, x_train); +# @btime XGBoost.predict($m_xgb, $x_train); -@info "evotrees train CPU:" -params_evo.device = "cpu" -@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=50); -@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); -@info "evotrees predict CPU:" -@time pred_evo = EvoTrees.predict(m_evo, x_train); -@btime EvoTrees.predict($m_evo, $x_train); +# @info "evotrees train CPU:" +# params_evo.device = "cpu" +# @time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=100); +# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); +# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train); +# @info "evotrees predict CPU:" +# @time pred_evo = EvoTrees.predict(m_evo, x_train); +# @btime EvoTrees.predict($m_evo, $x_train); CUDA.allowscalar(true) @info "evotrees train GPU:" params_evo.device = "gpu" @time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); -@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=50); -@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); -@info "evotrees predict GPU:" -@time pred_evo = EvoTrees.predict(m_evo_gpu, x_train); -@btime EvoTrees.predict($m_evo_gpu, $x_train); - -# w_train = ones(length(y_train)) -# @time m_evo_gpu = fit_evotree(params_evo, x_train, y_train); -# @time m_evo_gpu = fit_evotree(params_evo, x_train, y_train, w_train); \ No newline at end of file +@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); +@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); +@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); +@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=100); +# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); +# @info "evotrees predict GPU:" +# @time pred_evo = EvoTrees.predict(m_evo_gpu, x_train); +# @btime EvoTrees.predict($m_evo_gpu, $x_train); \ No newline at end of file diff --git a/experiments/random.jl b/experiments/random.jl index e02f333e..63e93c49 100644 --- a/experiments/random.jl +++ b/experiments/random.jl @@ -33,7 +33,7 @@ params1 = EvoTreeRegressor(T=Float32, # asus laptopt: for 1.25e6 no eval: 9.650007 seconds (893.53 k allocations: 2.391 GiB, 5.52% gc time) @time model = fit_evotree(params1; x_train, y_train); -@time model = fit_evotree(params1; x_train, y_train, metric=:mse, x_eval, y_eval, print_every_n=10); +@time model = fit_evotree(params1; x_train, y_train, metric=:mse, x_eval, y_eval, print_every_n=100); @btime model = fit_evotree(params1; x_train, y_train); @time pred_train = predict(model, x_train); @btime pred_train = predict(model, x_train); @@ -77,7 +77,7 @@ params1 = EvoTreeGaussian(T=Float32, # train model params1 = EvoTreeRegressor(T=Float32, loss=:linear, metric=:mse, - nrounds=10, + nrounds=100, lambda=1.0, gamma=0, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=0.5, nbins=64, @@ -86,7 +86,7 @@ params1 = EvoTreeRegressor(T=Float32, # Asus laptop: 10.015568 seconds (13.80 M allocations: 1.844 GiB, 4.00% gc time) @time model = EvoTrees.fit_evotree(params1; x_train, y_train); @btime model = EvoTrees.fit_evotree(params1; x_train, y_train); -@time model, cache = EvoTrees.init_evotree_gpu(params1, X_train, Y_train); +@time model, cache = EvoTrees.init_evotree_gpu(params1; x_train, y_train); @time EvoTrees.grow_evotree!(model, cache); using MLJBase @@ -118,14 +118,14 @@ params1 = EvoTreeRegressor(T=Float32, # GPU - Gaussian ################################ params1 = EvoTreeGaussian(T=Float32, - loss=:gaussian, metric=:gaussian, + loss=:gaussian, nrounds=100, lambda=1.0, gamma=0, eta=0.1, max_depth=6, min_weight=1.0, rowsample=0.5, colsample=0.5, nbins=32, device="gpu") # Asus laptop: 14.304369 seconds (24.81 M allocations: 2.011 GiB, 1.90% gc time) -@time model = EvoTrees.fit_evotree(params1, X_train, Y_train); +@time model = EvoTrees.fit_evotree(params1; x_train, y_train); # Auss laptop: 1.888472 seconds (8.40 k allocations: 1.613 GiB, 14.86% gc time) @time model, cache = EvoTrees.init_evotree(params1, X_train, Y_train); diff --git a/src/find_split.jl b/src/find_split.jl index 907d5290..7e233a2c 100644 --- a/src/find_split.jl +++ b/src/find_split.jl @@ -298,8 +298,10 @@ function hist_gains_cpu!( if bin == params.nbins gains[bin] = hL[i]^2 / (hL[i+1] + params.lambda * hL[i+2]) / 2 elseif hL[i+2] > params.min_weight && hR[i+2] > params.min_weight - predL = pred_scalar_cpu!(hL[i:i+2], params, K) - predR = pred_scalar_cpu!(hR[i:i+2], params, K) + if monotone_constraint != 0 + predL = pred_scalar_cpu!(view(hL, i:i+2), params, K) + predR = pred_scalar_cpu!(view(hR, i:i+2), params, K) + end if (monotone_constraint == 0) || (monotone_constraint == -1 && predL > predR) || (monotone_constraint == 1 && predL < predR) @@ -362,8 +364,10 @@ function hist_gains_cpu!( hL[i+1]^2 / (hL[i+3] + params.lambda * hL[i+4]) ) / 2 elseif hL[i+4] > params.min_weight && hR[i+4] > params.min_weight - predL = pred_scalar_cpu!(hL[i:i+4], params, K) - predR = pred_scalar_cpu!(hR[i:i+4], params, K) + if monotone_constraint != 0 + predL = pred_scalar_cpu!(view(hL, i:i+4), params, K) + predR = pred_scalar_cpu!(view(hR, i:i+4), params, K) + end if (monotone_constraint == 0) || (monotone_constraint == -1 && predL > predR) || (monotone_constraint == 1 && predL < predR) diff --git a/src/fit.jl b/src/fit.jl index fe7ff45a..abbe9ee2 100644 --- a/src/fit.jl +++ b/src/fit.jl @@ -94,7 +94,7 @@ function init_evotree( # assign monotone contraints in constraints vector monotone_constraints = zeros(Int32, x_size[2]) - hasproperty(params, :monotone_constraint) && for (k, v) in params.monotone_constraints + hasproperty(params, :monotone_constraints) && for (k, v) in params.monotone_constraints monotone_constraints[k] = v end diff --git a/src/gpu/find_split_gpu.jl b/src/gpu/find_split_gpu.jl index b6a29678..833d8749 100644 --- a/src/gpu/find_split_gpu.jl +++ b/src/gpu/find_split_gpu.jl @@ -237,8 +237,10 @@ function hist_gains_gpu_kernel!(gains::CuDeviceMatrix{T}, hL::CuDeviceArray{T,3} if i == nbins gains[i, j] = hL[1, i, j]^2 / (hL[2, i, j] + lambda * hL[3, i, j]) / 2 elseif hL[3, i, j] > min_weight && hR[3, i, j] > min_weight - predL = -hL[1, i, j] / (hL[2, i, j] + lambda * hL[3, i, j]) - predR = -hR[1, i, j] / (hR[2, i, j] + lambda * hR[3, i, j]) + if monotone_constraint != 0 + predL = -hL[1, i, j] / (hL[2, i, j] + lambda * hL[3, i, j]) + predR = -hR[1, i, j] / (hR[2, i, j] + lambda * hR[3, i, j]) + end if (monotone_constraint == 0) || (monotone_constraint == -1 && predL > predR) || (monotone_constraint == 1 && predL < predR) @@ -281,8 +283,10 @@ function hist_gains_gpu_kernel_gauss!(gains::CuDeviceMatrix{T}, hL::CuDeviceArra if i == nbins gains[i, j] = (hL[1, i, j]^2 / (hL[3, i, j] + lambda * hL[5, i, j]) + hL[2, i, j]^2 / (hL[4, i, j] + lambda * hL[5, i, j])) / 2 elseif hL[5, i, j] > min_weight && hR[5, i, j] > min_weight - predL = -hL[1, i, j] / (hL[3, i, j] + lambda * hL[5, i, j]) - predR = -hR[1, i, j] / (hR[3, i, j] + lambda * hR[5, i, j]) + if monotone_constraint != 0 + predL = -hL[1, i, j] / (hL[3, i, j] + lambda * hL[5, i, j]) + predR = -hR[1, i, j] / (hR[3, i, j] + lambda * hR[5, i, j]) + end if (monotone_constraint == 0) || (monotone_constraint == -1 && predL > predR) || (monotone_constraint == 1 && predL < predR) diff --git a/src/gpu/fit_gpu.jl b/src/gpu/fit_gpu.jl index 7dbb2f6f..d26cb3ed 100644 --- a/src/gpu/fit_gpu.jl +++ b/src/gpu/fit_gpu.jl @@ -68,7 +68,7 @@ function init_evotree_gpu( # assign monotone contraints in constraints vector monotone_constraints = zeros(Int32, x_size[2]) - hasproperty(params, :monotone_constraint) && for (k, v) in params.monotone_constraints + hasproperty(params, :monotone_constraints) && for (k, v) in params.monotone_constraints monotone_constraints[k] = v end diff --git a/src/predict.jl b/src/predict.jl index f6d3c64e..8f384799 100644 --- a/src/predict.jl +++ b/src/predict.jl @@ -103,7 +103,7 @@ function pred_leaf_cpu!( pred[1, n] = -params.eta * βˆ‘[1] / (βˆ‘[2] + params.lambda * βˆ‘[3]) end function pred_scalar_cpu!( - βˆ‘::Vector{T}, + βˆ‘::AbstractVector{T}, params::EvoTypes, K, ) where {L<:GradientRegression,T,S} @@ -123,7 +123,7 @@ function pred_leaf_cpu!( pred[1, n] = -params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5]) pred[2, n] = -params.eta * βˆ‘[2] / (βˆ‘[4] + params.lambda * βˆ‘[5]) end -function pred_scalar_cpu!(βˆ‘::Vector{T}, params::EvoTypes{L,T,S}, K) where {L<:MLE2P,T,S} +function pred_scalar_cpu!(βˆ‘::AbstractVector{T}, params::EvoTypes{L,T,S}, K) where {L<:MLE2P,T,S} -params.eta * βˆ‘[1] / (βˆ‘[3] + params.lambda * βˆ‘[5]) end @@ -171,6 +171,6 @@ function pred_leaf_cpu!( ) where {L<:L1Regression,T,S} pred[1, n] = params.eta * βˆ‘[1] / (βˆ‘[3] * (1 + params.lambda)) end -function pred_scalar_cpu!(βˆ‘::Vector, params::EvoTypes{L,T,S}, K) where {L<:L1Regression,T,S} +function pred_scalar_cpu!(βˆ‘::AbstractVector{T}, params::EvoTypes{L,T,S}, K) where {L<:L1Regression,T,S} params.eta * βˆ‘[1] / (βˆ‘[3] * (1 + params.lambda)) end \ No newline at end of file diff --git a/test/monotonic.jl b/test/monotonic.jl index 4acfde0d..873cd4f8 100644 --- a/test/monotonic.jl +++ b/test/monotonic.jl @@ -15,7 +15,7 @@ seed = 123 # train-eval split - 𝑖_sample = sample(𝑖, size(𝑖, 1), replace = false) + 𝑖_sample = sample(𝑖, size(𝑖, 1), replace=false) train_size = 0.8 𝑖_train = 𝑖_sample[1:floor(Int, train_size * size(𝑖, 1))] 𝑖_eval = 𝑖_sample[floor(Int, train_size * size(𝑖, 1))+1:end] @@ -28,51 +28,48 @@ ###################################### # benchmark params1 = EvoTreeRegressor( - device = "cpu", - loss = :linear, - metric = :mse, - nrounds = 200, - nbins = 32, - lambda = 1.0, - gamma = 0.0, - eta = 0.05, - max_depth = 6, - min_weight = 0.0, - rowsample = 0.5, - colsample = 1.0, - rng = seed, + device="cpu", + loss=:linear, + nrounds=20, + nbins=32, + lambda=1.0, + gamma=0.0, + eta=0.05, + max_depth=6, + min_weight=0.0, + rowsample=0.5, + colsample=1.0, + rng=seed, ) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) preds_ref = predict(model, x_train) # monotonic constraint params1 = EvoTreeRegressor( - device = "cpu", - loss = :linear, - metric = :mse, - nrounds = 200, - nbins = 32, - lambda = 1.0, - gamma = 0.0, - eta = 0.5, - max_depth = 6, - min_weight = 0.0, - monotone_constraints = Dict(1 => 1), - rowsample = 0.5, - colsample = 1.0, - rng = seed, + device="cpu", + loss=:linear, + nrounds=20, + nbins=32, + lambda=1.0, + gamma=0.0, + eta=0.5, + max_depth=6, + min_weight=0.0, + monotone_constraints=Dict(1 => 1), + rowsample=0.5, + colsample=1.0, + rng=seed, ) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) preds_mono = predict(model, x_train) # using Plots - # using Colors - # x_perm = sortperm(X_train[:, 1]) - # plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") - # plot!(X_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") - # plot!(X_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") + # x_perm = sortperm(x_train[:, 1]) + # plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") + # plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") + # plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") ###################################### @@ -116,51 +113,49 @@ ###################################### # benchmark params1 = EvoTreeRegressor( - device = "cpu", - loss = :logistic, - metric = :logloss, - nrounds = 200, - nbins = 32, - lambda = 0.05, - gamma = 0.0, - eta = 0.05, - max_depth = 6, - min_weight = 0.0, - rowsample = 0.5, - colsample = 1.0, - rng = seed, + device="cpu", + loss=:logistic, + metric=:logloss, + nrounds=200, + nbins=32, + lambda=0.05, + gamma=0.0, + eta=0.05, + max_depth=6, + min_weight=0.0, + rowsample=0.5, + colsample=1.0, + rng=seed, ) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) preds_ref = predict(model, x_train) # monotonic constraint params1 = EvoTreeRegressor( - device = "cpu", - loss = :logistic, - metric = :logloss, - nrounds = 200, - nbins = 32, - lambda = 0.05, - gamma = 0.0, - eta = 0.05, - max_depth = 6, - min_weight = 0.0, - monotone_constraints = Dict(1 => 1), - rowsample = 0.5, - colsample = 1.0, - rng = seed, + device="cpu", + loss=:logistic, + metric=:logloss, + nrounds=200, + nbins=32, + lambda=0.05, + gamma=0.0, + eta=0.05, + max_depth=6, + min_weight=0.0, + monotone_constraints=Dict(1 => 1), + rowsample=0.5, + colsample=1.0, + rng=seed, ) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) preds_mono = predict(model, x_train) - # using Plots - # using Colors - # x_perm = sortperm(X_train[:, 1]) - # plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") - # plot!(X_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") - # plot!(X_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") + # x_perm = sortperm(x_train[:, 1]) + # plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") + # plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") + # plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") ###################################### @@ -202,57 +197,55 @@ ###################################### ### Gaussian - CPU ###################################### - # linear - benchmark + # benchmark params1 = EvoTreeGaussian( - device = "cpu", - metric = :gaussian, - nrounds = 200, - nbins = 32, - lambda = 1.0, - gamma = 0.0, - eta = 0.05, - max_depth = 6, - min_weight = 0.0, - rowsample = 0.5, - colsample = 1.0, - rng = seed, + device="cpu", + metric=:gaussian, + nrounds=200, + nbins=32, + lambda=1.0, + gamma=0.0, + eta=0.05, + max_depth=6, + min_weight=0.0, + rowsample=0.5, + colsample=1.0, + rng=seed, ) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) preds_ref = predict(model, x_train) # monotonic constraint params1 = EvoTreeGaussian( - device = "cpu", - metric = :gaussian, - nrounds = 200, - nbins = 32, - lambda = 1.0, - gamma = 0.0, - eta = 0.5, - max_depth = 6, - min_weight = 0.0, - monotone_constraints = Dict(1 => 1), - rowsample = 0.5, - colsample = 1.0, - rng = seed, + device="cpu", + metric=:gaussian, + nrounds=200, + nbins=32, + lambda=1.0, + gamma=0.0, + eta=0.5, + max_depth=6, + min_weight=0.0, + monotone_constraints=Dict(1 => 1), + rowsample=0.5, + colsample=1.0, + rng=seed, ) - model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n = 25) + model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) preds_mono = predict(model, x_train) - # using Plots - # using Colors - # x_perm = sortperm(X_train[:, 1]) - # plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") - # plot!(X_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") - # plot!(X_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") + # x_perm = sortperm(x_train[:, 1]) + # plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") + # plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") + # plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") ###################################### ### Gaussian - GPU ###################################### - # linear - benchmark + # benchmark # params1 = EvoTreeGaussian( # device="gpu", # metric=:gaussian, @@ -261,8 +254,8 @@ # max_depth=6, min_weight=0.0, # rowsample=0.5, colsample=1.0, rng=seed) - # model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); - # preds_ref = predict(model, x_train); + # model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + # preds_ref = predict(model, x_train) # # monotonic constraint # params1 = EvoTreeGaussian( @@ -274,14 +267,12 @@ # monotone_constraints=Dict(1 => 1), # rowsample=0.5, colsample=1.0, rng=seed) - # model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25); - # preds_mono = predict(model, x_train); + # model = fit_evotree(params1; x_train, y_train, x_eval, y_eval, print_every_n=25) + # preds_mono = predict(model, x_train) - # using Plots - # using Colors - # x_perm = sortperm(X_train[:, 1]) - # plot(X_train, Y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="") - # plot!(X_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") - # plot!(X_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") + # x_perm = sortperm(x_train[:, 1]) + # plot(x_train, y_train, msize=1, mcolor="gray", mswidth=0, background_color=RGB(1, 1, 1), seriestype=:scatter, xaxis=("feature"), yaxis=("target"), legend=true, label="GPU Gauss") + # plot!(x_train[:, 1][x_perm], preds_ref[x_perm], color="navy", linewidth=1.5, label="Reference") + # plot!(x_train[:, 1][x_perm], preds_mono[x_perm], color="red", linewidth=1.5, label="Monotonic") end \ No newline at end of file From a3c2d78934faf40ec5b9d2d2688b35eba6b18e79 Mon Sep 17 00:00:00 2001 From: "jeremie.desgagne.bouchard" Date: Tue, 18 Oct 2022 01:22:52 -0400 Subject: [PATCH 11/11] up --- experiments/benchmarks_v2.jl | 40 ++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/experiments/benchmarks_v2.jl b/experiments/benchmarks_v2.jl index 79a74c33..14f66020 100644 --- a/experiments/benchmarks_v2.jl +++ b/experiments/benchmarks_v2.jl @@ -58,32 +58,28 @@ num_feat = Int(100) x_train = rand(nobs, num_feat) y_train = rand(size(x_train, 1)) -# @info "xgboost train:" -# @time m_xgb = xgboost(x_train, nrounds, label=y_train, param=params_xgb, metrics=metrics, nthread=nthread, silent=1); -# @btime xgboost($x_train, $nrounds, label=$y_train, param=$params_xgb, metrics=$metrics, nthread=$nthread, silent=1); -# @info "xgboost predict:" -# @time pred_xgb = XGBoost.predict(m_xgb, x_train); -# @btime XGBoost.predict($m_xgb, $x_train); +@info "xgboost train:" +@time m_xgb = xgboost(x_train, nrounds, label=y_train, param=params_xgb, metrics=metrics, nthread=nthread, silent=1); +@btime xgboost($x_train, $nrounds, label=$y_train, param=$params_xgb, metrics=$metrics, nthread=$nthread, silent=1); +@info "xgboost predict:" +@time pred_xgb = XGBoost.predict(m_xgb, x_train); +@btime XGBoost.predict($m_xgb, $x_train); -# @info "evotrees train CPU:" -# params_evo.device = "cpu" -# @time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=100); -# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); -# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train); -# @info "evotrees predict CPU:" -# @time pred_evo = EvoTrees.predict(m_evo, x_train); -# @btime EvoTrees.predict($m_evo, $x_train); +@info "evotrees train CPU:" +params_evo.device = "cpu" +@time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=100); +@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); +@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train); +@info "evotrees predict CPU:" +@time pred_evo = EvoTrees.predict(m_evo, x_train); +@btime EvoTrees.predict($m_evo, $x_train); CUDA.allowscalar(true) @info "evotrees train GPU:" params_evo.device = "gpu" @time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); -@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); -@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); -@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); -@time m_evo_gpu = fit_evotree(params_evo; x_train, y_train); @time m_evo = fit_evotree(params_evo; x_train, y_train, x_eval=x_train, y_eval=y_train, metric=metric_evo, print_every_n=100); -# @btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); -# @info "evotrees predict GPU:" -# @time pred_evo = EvoTrees.predict(m_evo_gpu, x_train); -# @btime EvoTrees.predict($m_evo_gpu, $x_train); \ No newline at end of file +@btime fit_evotree($params_evo; x_train=$x_train, y_train=$y_train, x_eval=$x_train, y_eval=$y_train, metric=metric_evo); +@info "evotrees predict GPU:" +@time pred_evo = EvoTrees.predict(m_evo_gpu, x_train); +@btime EvoTrees.predict($m_evo_gpu, $x_train); \ No newline at end of file