Skip to content

Commit

Permalink
Merge pull request #244 from Team-RADDISH/mmg/grf-update-and-cov-fix
Browse files Browse the repository at this point in the history
Update `GaussianRandomFields` version and add additional tests for linear Gaussian models
  • Loading branch information
DanGiles authored May 31, 2023
2 parents 87c0324 + aabb7c5 commit a0e7ea7
Show file tree
Hide file tree
Showing 5 changed files with 179 additions and 92 deletions.
89 changes: 5 additions & 84 deletions extra/linear_gaussian_validation.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
### A Pluto.jl notebook ###
# v0.19.18
# v0.19.22

using Markdown
using InteractiveUtils
Expand Down Expand Up @@ -195,35 +195,6 @@ function plot_filter_estimate_rmse_vs_n_particles(
)
end

# ╔═╡ 159ed63c-5dac-4f9b-a0cc-a5c13b6978e0
function diagonal_linear_gaussian_model_parameters(
state_dimension=3,
state_transition_coefficient=0.8,
observation_coefficient=1.0,
initial_state_std=1.0,
state_noise_std=0.6,
observation_noise_std=0.5,
)
return Dict(
:state_transition_matrix => ScalMat(
state_dimension, state_transition_coefficient
),
:observation_matrix => ScalMat(
state_dimension, observation_coefficient
),
:initial_state_mean => Zeros(state_dimension),
:initial_state_covar => ScalMat(
state_dimension, initial_state_std^2
),
:state_noise_covar => ScalMat(
state_dimension, state_noise_std^2
),
:observation_noise_covar => ScalMat(
state_dimension, observation_noise_std^2
),
)
end

# ╔═╡ 89dae12b-0010-4ea1-ae69-490137196662
let
n_time_step = 200
Expand All @@ -235,7 +206,7 @@ let
n_particle,
filter_type,
LinearGaussian.init,
diagonal_linear_gaussian_model_parameters(),
LinearGaussian.diagonal_linear_gaussian_model_parameters(),
seed
)
end
Expand All @@ -249,59 +220,12 @@ let
n_time_step,
n_particles,
LinearGaussian.init,
diagonal_linear_gaussian_model_parameters(),
LinearGaussian.diagonal_linear_gaussian_model_parameters(),
seed
)
# savefig(figure, "diagonal_linear_gaussian_model_estimate_rmse_vs_n_particles.pdf")
figure
end

# ╔═╡ db091a48-589f-4393-8951-aadc351588ff
function stochastically_driven_dsho_model_parameters(
δ=0.2,
ω=1.,
Q=2.,
σ=0.5,
)
β = sqrt(Q^2 - 1 / 4)
return Dict(
:state_transition_matrix => exp(-ω * δ / 2Q) * [
[
cos* β * δ / Q) + sin* β * δ / Q) / 2β,
Q * sin* β * δ / Q) /* β)
]';
[
-Q * ω * sin* δ * β / Q) / β,
cos* δ * β / Q) - sin* δ * β / Q) / 2β
]'
],
:observation_matrix => ScalMat(2, 1.),
:initial_state_mean => Zeros(2),
:initial_state_covar => ScalMat(2, 1.),
:state_noise_covar => PDMat(
Q * exp(-ω * δ / Q) * [
[
(
(cos(2ω * δ * β / Q) - 1)
- 2β * sin(2ω * δ * β / Q)
+ 4β^2 * (exp* δ / Q) - 1)
) / (8ω^3 * β^2),
Q * sin* δ * β / Q)^2 / (2ω^2 * β^2)
]';
[
Q * sin* δ * β / Q)^2 / (2ω^2 * β^2),
(
(cos(2ω * δ * β / Q) - 1)
+ 2β * sin(2ω * δ * β / Q)
+ 4β^2 * (exp* δ / Q) - 1)
) / (8ω * β^2),
]'
]
),
:observation_noise_covar => ScalMat(2, σ^2)
)
end

# ╔═╡ 64a289be-75ce-42e2-9e43-8e0286f70a35
let
n_time_step = 200
Expand All @@ -313,7 +237,7 @@ let
n_particle,
filter_type,
LinearGaussian.init,
stochastically_driven_dsho_model_parameters(),
LinearGaussian.stochastically_driven_dsho_model_parameters(),
seed
)
end
Expand All @@ -328,10 +252,9 @@ let
n_time_step,
n_particles,
LinearGaussian.init,
stochastically_driven_dsho_model_parameters(),
LinearGaussian.stochastically_driven_dsho_model_parameters(),
seed
)
# savefig(figure, "dsho_linear_gaussian_model_estimate_rmse_vs_n_particles.pdf")
figure
end

Expand All @@ -341,9 +264,7 @@ end
# ╠═4d2656ca-eacb-4d2b-91cb-bc82fdb49520
# ╠═a64762bb-3a9f-4b1c-83db-f1a366f282eb
# ╠═2ad564f3-48a2-4c2a-8d7d-384a84f7d6d2
# ╠═159ed63c-5dac-4f9b-a0cc-a5c13b6978e0
# ╠═89dae12b-0010-4ea1-ae69-490137196662
# ╠═3e0abdfc-8668-431c-8ad3-61802e21d34e
# ╠═db091a48-589f-4393-8951-aadc351588ff
# ╠═64a289be-75ce-42e2-9e43-8e0286f70a35
# ╠═b396f776-885b-437a-94c3-693f318d7ed2
2 changes: 1 addition & 1 deletion test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ YAML = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6"
[compat]
Distributions = "0.22, 0.23, 0.24, 0.25"
FillArrays = "0.13"
GaussianRandomFields = "2.1.1"
GaussianRandomFields = "2.2.1"
HDF5 = "0.14, 0.15, 0.16"
MPI = "0.20.8"
OrdinaryDiffEq = "6.40"
Expand Down
73 changes: 73 additions & 0 deletions test/models/lineargaussian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,79 @@ struct LinearGaussianModel{S <: Real, T <: Real}
observation_noise_distribution::MvNormal{T}
end

function diagonal_linear_gaussian_model_parameters(
state_dimension=3,
state_transition_coefficient=0.8,
observation_coefficient=1.0,
initial_state_std=1.0,
state_noise_std=0.6,
observation_noise_std=0.5,
)
return Dict(
:state_transition_matrix => ScalMat(
state_dimension, state_transition_coefficient
),
:observation_matrix => ScalMat(
state_dimension, observation_coefficient
),
:initial_state_mean => Zeros(state_dimension),
:initial_state_covar => ScalMat(
state_dimension, initial_state_std^2
),
:state_noise_covar => ScalMat(
state_dimension, state_noise_std^2
),
:observation_noise_covar => ScalMat(
state_dimension, observation_noise_std^2
),
)
end

function stochastically_driven_dsho_model_parameters(
δ=0.2,
ω=1.,
Q=2.,
σ=0.5,
)
β = sqrt(Q^2 - 1 / 4)
return Dict(
:state_transition_matrix => exp(-ω * δ / 2Q) * [
[
cos* β * δ / Q) + sin* β * δ / Q) / 2β,
Q * sin* β * δ / Q) /* β)
]';
[
-Q * ω * sin* δ * β / Q) / β,
cos* δ * β / Q) - sin* δ * β / Q) / 2β
]'
],
:observation_matrix => ScalMat(2, 1.),
:initial_state_mean => Zeros(2),
:initial_state_covar => ScalMat(2, 1.),
:state_noise_covar => PDMat(
Q * exp(-ω * δ / Q) * [
[
(
(cos(2ω * δ * β / Q) - 1)
- 2β * sin(2ω * δ * β / Q)
+ 4β^2 * (exp* δ / Q) - 1)
) / (8ω^3 * β^2),
Q * sin* δ * β / Q)^2 / (2ω^2 * β^2)
]';
[
Q * sin* δ * β / Q)^2 / (2ω^2 * β^2),
(
(cos(2ω * δ * β / Q) - 1)
+ 2β * sin(2ω * δ * β / Q)
+ 4β^2 * (exp* δ / Q) - 1)
) / (8ω * β^2),
]'
]
),
:observation_noise_covar => ScalMat(2, σ^2)
)
end

function init(parameters_dict::Dict)
parameters = LinearGaussianModelParameters(; parameters_dict...)
(observation_dimension, state_dimension) = size(
Expand Down
2 changes: 1 addition & 1 deletion test/models/llw2d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ function get_covariance_gaussian_random_fields(
model_parameters, (x_index_2, y_index_2)
)
covariance_structure = gaussian_random_fields[var_index_1].grf.cov.cov
return covariance_structure.σ^2 * apply(
return apply(
covariance_structure, abs.(grid_point_1 .- grid_point_2)
)
else
Expand Down
105 changes: 99 additions & 6 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ using HDF5, LinearAlgebra, MPI, PDMats, Random, StableRNGs, Statistics, Test, YA

include(joinpath(@__DIR__, "models", "llw2d.jl"))
include(joinpath(@__DIR__, "models", "lorenz63.jl"))
include(joinpath(@__DIR__, "models", "lineargaussian.jl"))
include(joinpath(@__DIR__, "kalman.jl"))

using .LLW2d
using .Lorenz63
Expand Down Expand Up @@ -226,10 +228,13 @@ function run_unit_tests_for_generic_model_interface(model, seed)
end

@testset (
"Generic model interface unit tests - $model_module"
) for model_module in (LLW2d, Lorenz63)
"Generic model interface unit tests - $(parentmodule(typeof(model)))"
) for model in (
LLW2d.init(Dict()),
Lorenz63.init(Dict()),
LinearGaussian.init(LinearGaussian.stochastically_driven_dsho_model_parameters())
)
seed = 1234
model = model_module.init(Dict())
run_unit_tests_for_generic_model_interface(model, seed)
end

Expand Down Expand Up @@ -508,10 +513,14 @@ function run_tests_for_optimal_proposal_model_interface(
end

@testset (
"Optimal proposal model interface unit tests - $(model_module)"
) for model_module in (LLW2d, Lorenz63)
"Optimal proposal model interface unit tests - $(parentmodule(typeof(model)))"
) for model in (
# Use sigma != 1. to test if covariance is being scaled by sigma correctly
LLW2d.init(Dict("llw2d" => Dict("sigma" => [0.5, 1.5, 1.5]))),
Lorenz63.init(Dict()),
LinearGaussian.init(LinearGaussian.stochastically_driven_dsho_model_parameters())
)
seed = 1234
model = model_module.init(Dict())
# Number of samples to use in convergence tests of Monte Carlo estimates
estimate_n_samples = [10, 100, 1000]
# Constant factor used in Monte Carlo estimate convergence tests. Set based on some
Expand All @@ -523,6 +532,90 @@ end
)
end

function run_tests_for_convergence_of_filter_estimates_against_kalman_filter(
filter_type,
init_model,
model_parameters_dict,
seed,
n_time_step,
n_particles,
mean_rmse_bound_constant,
log_var_rmse_bound_constant,
)
rng = Random.TaskLocalRNG()
Random.seed!(rng, seed)
model = init_model(model_parameters_dict)
observation_seq = ParticleDA.simulate_observations_from_model(
model, n_time_step; rng=rng
)
true_state_mean_seq, true_state_var_seq = Kalman.run_kalman_filter(
model, observation_seq
)
for n_particle in n_particles
output_filename = tempname()
filter_parameters = ParticleDA.FilterParameters(
nprt=n_particle, verbose=true, output_filename=output_filename
)
states, statistics = ParticleDA.run_particle_filter(
init_model,
filter_parameters,
model_parameters_dict,
observation_seq,
filter_type,
ParticleDA.MeanAndVarSummaryStat;
rng=rng
)
state_mean_seq = Matrix{ParticleDA.get_state_eltype(model)}(
undef, ParticleDA.get_state_dimension(model), n_time_step
)
state_var_seq = Matrix{ParticleDA.get_state_eltype(model)}(
undef, ParticleDA.get_state_dimension(model), n_time_step
)
weights_seq = Matrix{Float64}(undef, n_particle, n_time_step)
h5open(output_filename, "r") do file
for t in 1:n_time_step
key = ParticleDA.time_index_to_hdf5_key(t)
state_mean_seq[:, t] = read(file["state_avg"][key])
state_var_seq[:, t] = read(file["state_var"][key])
weights_seq[:, t] = read(file["weights"][key])
end
end
mean_rmse = sqrt(
mean(x -> x.^2, state_mean_seq .- true_state_mean_seq)
)
log_var_rmse = sqrt(
mean(x -> x.^2, log.(state_var_seq) .- log.(true_state_var_seq))
)
# Monte Carlo estimates of mean and log variance should have O(sqrt(n_particle))
# convergence to true values
@test mean_rmse < mean_rmse_bound_constant / sqrt(n_particle)
@test log_var_rmse < log_var_rmse_bound_constant / sqrt(n_particle)
end
end

@testset (
"Filter estimate validation against Kalman filter - $(filter_type)"
) for filter_type in (BootstrapFilter, OptimalFilter)
seed = 1234
n_time_step = 100
n_particles = [30, 100, 300, 1000]
# Constant factora used in Monte Carlo estimate convergence tests. Set based on some
# trial and error to keep tests relatively sensitive while avoiding too high
# probability of false failures
mean_rmse_bound_constant = 1.
log_var_rmse_bound_constant = 5.
run_tests_for_convergence_of_filter_estimates_against_kalman_filter(
filter_type,
LinearGaussian.init,
LinearGaussian.stochastically_driven_dsho_model_parameters(),
seed,
n_time_step,
n_particles,
mean_rmse_bound_constant,
log_var_rmse_bound_constant,
)
end

@testset "Summary statistics unit tests" begin
MPI.Init()
seed = 5678
Expand Down

0 comments on commit a0e7ea7

Please sign in to comment.