Skip to content

Commit

Permalink
remove old observations framework
Browse files Browse the repository at this point in the history
removed unused functionality based on old Observations

typo docs

example typo
  • Loading branch information
odunbar committed Jul 31, 2024
1 parent f2e95cf commit 69eb4a5
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 59 deletions.
13 changes: 8 additions & 5 deletions docs/src/examples/Cloudy_example.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ and finally the EKP packages.

```julia
using EnsembleKalmanProcesses
using EnsembleKalmanProcesses.Observations
using EnsembleKalmanProcesses.ParameterDistributions
using EnsembleKalmanProcesses.DataContainers
using EnsembleKalmanProcesses.PlotRecipes
Expand Down Expand Up @@ -163,8 +162,13 @@ for i in 1:n_samples
y_t[:, i] = G_t .+ rand(MvNormal(μ, Γy))
end

truth = Observations.Observation(y_t, Γy, data_names)
truth_sample = truth.mean
truth = Observation(
Dict(
"samples" => vec(mean(y_t, dims = 2)),
"covariances" => Γy,
"names" => data_names,
)
)
```

#### Perform ensemble Kalman inversion
Expand All @@ -181,8 +185,7 @@ N_iter = 8 # number of EKI iterations
initial_params = construct_initial_ensemble(rng, priors, N_ens)
ekiobj = EnsembleKalmanProcess(
initial_params,
truth_sample,
truth.obs_noise_cov,
truth,
Inversion(),
scheduler=DataMisfitController()
)
Expand Down
1 change: 0 additions & 1 deletion docs/src/examples/lorenz_example.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ using CalibrateEmulateSample.Utilities
using CalibrateEmulateSample.EnsembleKalmanProcesses
using CalibrateEmulateSample.ParameterDistributions
using CalibrateEmulateSample.DataContainers
using CalibrateEmulateSample.Observations
```

The first input settings define which input-output pairs to use for training the emulator. The Calibrate stage (run using `calibrate.jl`) generates parameter-to-data pairs by running the L96 system using an iterative optimization approach (`EnsembleKalmanProcess.jl`). So we first define which iterations we would like to use data from for our emulator training
Expand Down
15 changes: 6 additions & 9 deletions examples/Cloudy/Cloudy_calibrate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ include(joinpath(@__DIR__, "DynamicalModel.jl"))

# Import Ensemble Kalman Processes modules
using EnsembleKalmanProcesses
using EnsembleKalmanProcesses.Observations
using EnsembleKalmanProcesses.ParameterDistributions
using EnsembleKalmanProcesses.DataContainers
using EnsembleKalmanProcesses.PlotRecipes
Expand Down Expand Up @@ -100,7 +99,7 @@ savefig(p, output_directory * "cloudy_priors.png")
### Define the data from which we want to learn the parameters
###

data_names = ["M0", "M1", "M2"]
data_names = ["M0_M1_M2"]
moments = [0.0, 1.0, 2.0]
n_moments = length(moments)

Expand Down Expand Up @@ -139,8 +138,7 @@ for i in 1:n_samples
y_t[:, i] = G_t .+ rand(MvNormal(μ, Γy))
end

truth = Observations.Observation(y_t, Γy, data_names)
truth_sample = truth.mean
truth = Observation(Dict("samples" => vec(mean(y_t, dims = 2)), "covariances" => Γy, "names" => data_names))


###
Expand All @@ -153,8 +151,7 @@ N_iter = 15 # number of EKI iterations
initial_params = construct_initial_ensemble(rng, priors, N_ens)
ekiobj = EnsembleKalmanProcess(
initial_params,
truth_sample,
truth.obs_noise_cov,
truth,
Inversion(),
scheduler = DataMisfitController(),
)
Expand Down Expand Up @@ -196,9 +193,9 @@ save(
"eki",
ekiobj,
"truth_sample",
truth_sample,
"truth_sample_mean",
truth.mean,
get_sample(truth),
"truth_sample_mean",
vec(mean(y_t, dims = 2)),
"truth_input_constrained",
ϕ_true,
)
Expand Down
12 changes: 5 additions & 7 deletions examples/Lorenz/calibrate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ function main()
###
### Define the data from which we want to learn the parameters
###
data_names = ["y0", "y1"]
data_names = ["y0_y1"]


###
Expand Down Expand Up @@ -254,8 +254,7 @@ function main()


# Construct observation object
truth = Observations.Observation(yt, Γy, data_names)
truth_sample = yt[:, end]
truth = Observation(Dict("samples" => vec(mean(yt, dims = 2)), "covariances" => Γy, "names" => data_names))
###
### Calibrate: Ensemble Kalman Inversion
###
Expand All @@ -271,8 +270,7 @@ function main()

ekiobj = EKP.EnsembleKalmanProcess(
initial_params,
truth_sample,
truth.obs_noise_cov,
truth,
EKP.Inversion(),
scheduler = EKP.DataMisfitController(),
verbose = true,
Expand Down Expand Up @@ -316,9 +314,9 @@ function main()
"eki",
ekiobj,
"truth_sample",
truth_sample,
get_sample(truth),
"truth_sample_mean",
truth.mean,
vec(mean(yt, dims = 2)),
"truth_input_constrained",
params_true, #constrained here, as these are in a physically constrained space (unlike the u inputs),
)
Expand Down
1 change: 0 additions & 1 deletion examples/Lorenz/emulate_sample.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ using CalibrateEmulateSample.Utilities
using CalibrateEmulateSample.EnsembleKalmanProcesses
using CalibrateEmulateSample.ParameterDistributions
using CalibrateEmulateSample.DataContainers
using CalibrateEmulateSample.Observations

function get_standardizing_factors(data::Array{FT, 2}) where {FT}
# Input: data size: N_data x N_ensembles
Expand Down
4 changes: 2 additions & 2 deletions src/CalibrateEmulateSample.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ module CalibrateEmulateSample
using Distributions, Statistics, LinearAlgebra, DocStringExtensions

# imported modules from EKP.
import EnsembleKalmanProcesses: EnsembleKalmanProcesses, ParameterDistributions, Observations, DataContainers
import EnsembleKalmanProcesses: EnsembleKalmanProcesses, ParameterDistributions, DataContainers

export EnsembleKalmanProcesses, ParameterDistributions, Observations, DataContainers
export EnsembleKalmanProcesses, ParameterDistributions, DataContainers


# Internal deps, light external deps
Expand Down
29 changes: 0 additions & 29 deletions src/Utilities.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,11 @@ using LinearAlgebra
using Statistics
using StatsBase
using Random
using ..Observations
using ..EnsembleKalmanProcesses
EnsembleKalmanProcess = EnsembleKalmanProcesses.EnsembleKalmanProcess
using ..DataContainers

export get_training_points
export get_obs_sample
export orig2zscore
export zscore2orig
"""
Expand Down Expand Up @@ -50,33 +48,6 @@ function get_training_points(
return training_points
end


"""
$(DocStringExtensions.TYPEDSIGNATURES)
Return a random sample from the observations, for use in the MCMC.
- `rng` - optional RNG object used to pick random sample; defaults to `Random.GLOBAL_RNG`.
- `obs` - Observation struct with the observations (extract will pick one
of the sample observations to train).
- `rng_seed` - optional kwarg; if provided, used to re-seed `rng` before sampling.
"""
function get_obs_sample(
rng::Random.AbstractRNG,
obs::Observation;
rng_seed::Union{IT, Nothing} = nothing,
) where {IT <: Int}
# Ensuring reproducibility of the sampled parameter values:
# re-seed the rng *only* if we're given a seed
if rng_seed !== nothing
rng = Random.seed!(rng, rng_seed)
end
row_idxs = StatsBase.sample(rng, axes(obs.samples, 1), 1; replace = false, ordered = false)
return obs.samples[row_idxs...]
end
# first arg optional; defaults to GLOBAL_RNG (as in Random, StatsBase)
get_obs_sample(obs::Observation; kwargs...) = get_obs_sample(Random.GLOBAL_RNG, obs; kwargs...)

function orig2zscore(X::AbstractVector{FT}, mean::AbstractVector{FT}, std::AbstractVector{FT}) where {FT}
# Compute the z scores of a vector X using the given mean
# and std
Expand Down
5 changes: 0 additions & 5 deletions test/Utilities/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ using Statistics
using LinearAlgebra

using CalibrateEmulateSample.Utilities
using CalibrateEmulateSample.Observations
using CalibrateEmulateSample.EnsembleKalmanProcesses
using CalibrateEmulateSample.DataContainers

Expand All @@ -15,10 +14,6 @@ using CalibrateEmulateSample.DataContainers

arr = vcat([i * ones(3)' for i in 1:5]...)
arr_t = permutedims(arr, (2, 1))
data_names = ["d1", "d2", "d3"]
obs = Observation(arr_t, data_names) #data must be columns as default
sample = get_obs_sample(rng, obs)
@test sample == [5.0, 5.0, 5.0]

mean_arr = dropdims(mean(arr, dims = 1), dims = 1)
std_arr = dropdims(std(arr, dims = 1), dims = 1)
Expand Down

0 comments on commit 69eb4a5

Please sign in to comment.