This reproducible pipeline can be used to reproduce our analysis up to
fitting models to our simulated scenarios and case studies and post
processing these results. This workflow uses the targets
package and
you may find reviewing an overview of how this package works (i.e in the
package documentation) helpful if making use of our code. For a
simplified version of our analysis that does not make use of targets
see the repository README.
The analysis pipeline for this work can be regenerated by rendering this file,
rmarkdown::render("_targets.Rmd")
The pipeline can then be run using,
tar_make()
The complete pipeline can be visualised using,
tar_visnetwork()
Alternatively the pipeline can be explored interactively using this
notebook or updated programmatically using the scripts in bin
. We also
provide an archived version of our targets
workflow if only wanting to
reproduce sections of our analysis. This can be downloaded using the
following,
source(here::here("R", "targets-archive.R"))
get_targets_archive()
Set up the workflow pipeline and options. We first load the targets
package and remove the potentially outdated workflow.
library(targets)
library(stantargets)
library(tarchetypes)
library(data.table)
library(ggplot2)
library(purrr, quietly = TRUE)
#>
#> Attaching package: 'purrr'
#> The following object is masked from 'package:data.table':
#>
#> transpose
library(here)
#> here() starts at /home/seabbs/Dropbox/academic/projects/dynamicaltruncation
library(lubridate)
#>
#> Attaching package: 'lubridate'
#> The following objects are masked from 'package:data.table':
#>
#> hour, isoweek, mday, minute, month, quarter, second, wday, week,
#> yday, year
#> The following objects are masked from 'package:base':
#>
#> date, intersect, setdiff, union
library(arrow)
#>
#> Attaching package: 'arrow'
#> The following object is masked from 'package:lubridate':
#>
#> duration
#> The following object is masked from 'package:utils':
#>
#> timestamp
library(future)
library(future.callr)
tar_unscript()
We now define shared global options across our workflow and load R
functions from the R
folder.
library(targets)
library(tarchetypes)
library(stantargets)
library(cmdstanr)
library(data.table)
library(ggplot2)
library(purrr, quietly = TRUE)
library(arrow)
library(here)
library(future)
library(future.callr)
plan(callr)
functions <- list.files(here("R"), full.names = TRUE)
walk(functions, source)
rm("functions")
set_cmdstan_path()
# Set the number of chains to run in parallel (more than 4 will have no impact
# on runtimes)
parallel_chains <- 4
tar_option_set(
packages = c("data.table", "ggplot2", "purrr", "cmdstanr", "brms", "here",
"arrow"),
deployment = "main",
memory = "transient",
workspace_on_error = TRUE,
error = "continue",
garbage_collection = TRUE
)
#> Establish _targets.R and _targets_r/globals/globals.R.
- We assume 3 distribution scenarios: short, medium, and long.
tar_group_by(
distributions,
data.table(
scenario = c("short", "medium", "long"),
meanlog = c(1.2, 1.6, 1.8),
sdlog = c(0.4, 0.6, 0.8)
) |>
add_natural_scale_mean_sd(),
scenario
)
#> Establish _targets.R and _targets_r/targets/distributions.R.
- Save distribution scenarios
tar_file(
save_distributions,
save_csv(distributions, "distributions.csv", path = "data/meta")
)
#> Establish _targets.R and _targets_r/targets/save_distributions.R.
- Simulate the outbreak.
tar_target(simulated_cases_outbreak, {
simulate_gillespie(r = 0.2, gamma = 1 / 7, init_I = 50, n = 10000, seed = 101)
})
#> Define target simulated_cases_outbreak from chunk code.
#> Establish _targets.R and _targets_r/targets/simulated_cases_outbreak.R.
- Simulate observations of primary and secondary events as linelist for each distribution scenario.
tar_target(
simulated_secondary_outbreak,
simulated_cases_outbreak |>
simulate_secondary(
meanlog = distributions[, "meanlog"][[1]],
sdlog = distributions[, "sdlog"][[1]]
) |>
DT(, distribution := distributions[, "scenario"][[1]]),
pattern = map(distributions)
)
#> Establish _targets.R and _targets_r/targets/simulated_secondary_outbreak.R.
- Simulate the observation process
tar_target(simulated_observations_outbreak, {
simulated_secondary_outbreak |>
observe_process()
})
#> Define target simulated_observations_outbreak from chunk code.
#> Establish _targets.R and _targets_r/targets/simulated_observations_outbreak.R.
- Save the outbreak simulation
tar_file(
save_outbreak_data,
save_csv(
simulated_observations_outbreak, "outbreak-simulation.csv", path = "data/scenarios"
)
)
#> Establish _targets.R and _targets_r/targets/save_outbreak_data.R.
- For outbreak simulations, we estimate across sample size ranges (N =
10, 100, 200, 400).
N = 400
is the default case.
tar_target(sample_sizes, {
c(10, 100, 200, 400)
})
#> Define target sample_sizes from chunk code.
#> Establish _targets.R and _targets_r/targets/sample_sizes.R.
- For the outbreak simulation, we estimate all models at chosen points across the outbreak (suggestion: “early outbreak” (15 days), “near peak” (30 days), “past peak” (45 days), “late outbreak” (60 days)).
tar_group_by(
outbreak_estimation_times,
data.table(
scenario = c("early outbreak", "near peak", "past peak", "late outbreak"),
time = c(15, 30, 45, 60)
),
scenario
)
#> Establish _targets.R and _targets_r/targets/outbreak_estimation_times.R.
- Save outbreak observation times
tar_file(
save_outbreak_estimation_times,
save_csv(
outbreak_estimation_times, "outbreak_estimation_times.csv",
path = "data/meta"
)
)
#> Establish _targets.R and _targets_r/targets/save_outbreak_estimation_times.R.
- Truncate the available simulate observations based on the estimation time for each scenario.
tar_target(
truncated_sim_obs_outbreak,
simulated_observations_outbreak |>
filter_obs_by_obs_time(
obs_time = outbreak_estimation_times[, "time"][[1]]
) |>
DT(, scenario := outbreak_estimation_times[, "scenario"][[1]]),
pattern = map(outbreak_estimation_times)
)
#> Establish _targets.R and _targets_r/targets/truncated_sim_obs_outbreak.R.
tar_group_by(
group_truncated_sim_obs_outbreak,
truncated_sim_obs_outbreak,
scenario, distribution
)
#> Establish _targets.R and _targets_r/targets/group_truncated_sim_obs_outbreak.R.
- Retrospective incidence
tar_target(
retro_outbreak_incidence,
simulated_observations_outbreak |>
filter_obs_by_ptime(
obs_time = outbreak_estimation_times[, "time"][[1]],
obs_at = "max_secondary"
) |>
DT(, scenario := outbreak_estimation_times[, "scenario"][[1]]) |>
event_to_incidence(c("scenario", "distribution")),
pattern = map(outbreak_estimation_times)
)
#> Establish _targets.R and _targets_r/targets/retro_outbreak_incidence.R.
- Sample observations
tar_target(
sampled_simulated_observations_outbreak,
group_truncated_sim_obs_outbreak |>
as.data.table() |>
DT(sample(1:.N, min(.N, sample_sizes), replace = FALSE)) |>
DT(, sample_size := as.factor(sample_sizes)) |>
DT(, data_type := "outbreak"),
pattern = cross(sample_sizes, group_truncated_sim_obs_outbreak)
)
#> Establish _targets.R and _targets_r/targets/sampled_simulated_observations_outbreak.R.
tar_target(list_simulated_observations_outbreak, {
sampled_simulated_observations_outbreak |>
split(by = c("scenario", "distribution", "sample_size", "data_type"))
})
#> Define target list_simulated_observations_outbreak from chunk code.
#> Establish _targets.R and _targets_r/targets/list_simulated_observations_outbreak.R.
- Get simulated outbreak scenarios
tar_target(simulated_scenarios_outbreak, {
sampled_simulated_observations_outbreak |>
DT(, .(scenario, distribution, sample_size, data_type)) |>
unique() |>
DT(, id := 1:.N)
})
#> Define target simulated_scenarios_outbreak from chunk code.
#> Establish _targets.R and _targets_r/targets/simulated_scenarios_outbreak.R.
- We simulate scenarios in which the incidence of primary event is changing exponentially. We consider ranging from -0.2 to 0.2.
tar_target(growth_rate, {
data.table(
r = c(-0.2, -0.1, 0, 0.1, 0.2),
scenario = c("fast decay", "decay", "stable", "growth", "fast growth")
)
})
#> Define target growth_rate from chunk code.
#> Establish _targets.R and _targets_r/targets/growth_rate.R.
- Save growth rate scenarios.
tar_file(
save_growth_rate,
save_csv(growth_rate, "growth_rates.csv", path = "data/meta")
)
#> Establish _targets.R and _targets_r/targets/save_growth_rate.R.
- Simulate data.
tar_target(
simulated_cases_exponential,
simulate_exponential_cases(
r = growth_rate[,"r"][[1]]
) |>
DT(, r := growth_rate[,"r"][[1]]) |>
DT(, scenario := growth_rate[,"scenario"][[1]]),
pattern = map(growth_rate)
)
#> Establish _targets.R and _targets_r/targets/simulated_cases_exponential.R.
- Simulate observations of primary and secondary events as linelist for each distribution scenario.
tar_target(
simulated_secondary_exponential,
simulated_cases_exponential |>
simulate_secondary(
meanlog = distributions[, "meanlog"][[1]],
sdlog = distributions[, "sdlog"][[1]]
) |>
DT(, distribution := distributions[, "scenario"][[1]]),
pattern = map(distributions)
)
#> Establish _targets.R and _targets_r/targets/simulated_secondary_exponential.R.
- Simulate the observation process
tar_target(simulated_observations_exponential, {
simulated_secondary_exponential |>
observe_process()
})
#> Define target simulated_observations_exponential from chunk code.
#> Establish _targets.R and _targets_r/targets/simulated_observations_exponential.R.
- Save the exponential simulation
tar_file(
save_exponential_data,
save_csv(
simulated_observations_exponential, "exponential-simulation.csv",
path = "data/scenarios"
)
)
#> Establish _targets.R and _targets_r/targets/save_exponential_data.R.
- For the exponential simulation, we truncate at
t = 30
.
tar_target(
truncated_sim_obs_exponential,
simulated_observations_exponential |>
filter_obs_by_obs_time(obs_time = 30) |>
DT(, estimation_time := 30)
)
#> Establish _targets.R and _targets_r/targets/truncated_sim_obs_exponential.R.
tar_group_by(
group_sim_obs_exponential,
truncated_sim_obs_exponential,
scenario, distribution
)
#> Establish _targets.R and _targets_r/targets/group_sim_obs_exponential.R.
- Retrospective incidence
tar_target(retro_exponential_incidence, {
simulated_observations_exponential |>
filter_obs_by_ptime(
obs_time = 30,
obs_at = "max_secondary"
) |>
event_to_incidence(by = c("r", "scenario", "distribution"))
})
#> Define target retro_exponential_incidence from chunk code.
#> Establish _targets.R and _targets_r/targets/retro_exponential_incidence.R.
- Number of replicate observation processes
tar_target(replicates_exponential, {
1:20
})
#> Define target replicates_exponential from chunk code.
#> Establish _targets.R and _targets_r/targets/replicates_exponential.R.
- Sample observations
tar_target(
sampled_simulated_observations_exponential,
group_sim_obs_exponential |>
as.data.table() |>
DT(sample(1:.N, min(.N, 200), replace = FALSE)) |>
DT(, sample_size := as.factor(200)) |>
DT(, data_type := "exponential") |>
DT(, replicate := replicates_exponential),
pattern = cross(
group_sim_obs_exponential, replicates_exponential
)
)
#> Establish _targets.R and _targets_r/targets/sampled_simulated_observations_exponential.R.
- Group and list unique scenarios for downstream modelling.
tar_target(list_simulated_observations_exponential, {
sampled_simulated_observations_exponential |>
split(
by = c("scenario", "distribution", "sample_size", "data_type", "replicate")
)
})
#> Define target list_simulated_observations_exponential from chunk code.
#> Establish _targets.R and _targets_r/targets/list_simulated_observations_exponential.R.
tar_target(simulated_scenarios_exponential, {
sampled_simulated_observations_exponential |>
DT(, .(scenario, distribution, sample_size, data_type, replicate)) |>
unique() |>
DT(, id := 1:.N)
})
#> Define target simulated_scenarios_exponential from chunk code.
#> Establish _targets.R and _targets_r/targets/simulated_scenarios_exponential.R.
- Case study using linelist data from “Transmission dynamics of Ebola virus disease and intervention effectiveness in Sierra LeoneTransmission dynamics of Ebola virus disease and intervention effectiveness in Sierra Leone”. We download and save only confirmed cases. Note this was done manually due to the journal blocking automated downloads.
tar_target(raw_case_study_data, {
fread(here("data-raw", "pnas.1518587113.sd02.csv"))
})
#> Define target raw_case_study_data from chunk code.
#> Establish _targets.R and _targets_r/targets/raw_case_study_data.R.
- The data contains ages, sex, symptom onset date, date of sample
testing, the district of the case, and the Chiefdom of the case.
Here we convert these dates into the primary and secondary events
dynamicaltruncation
requires by assuming daily censoring. This means we are estimating the delay between symptom onset and a sample being tested. As we are considering overall cases only we keep only dates and our newly created delay variables.
tar_target(case_study_data, {
raw_case_study_data |>
DT(,
.(
id = 1:.N, onset_date = lubridate::dmy(`Date of symptom onset`),
test_date = lubridate::dmy(`Date of sample tested`)
)
) |>
DT(, `:=`(
ptime = as.numeric(onset_date - min(onset_date)),
stime = as.numeric(test_date - min(onset_date))
)
) |>
observe_process()
})
#> Define target case_study_data from chunk code.
#> Establish _targets.R and _targets_r/targets/case_study_data.R.
- Save the processed data
tar_file(
save_case_study_data,
save_csv(
case_study_data, "ebola_case_study.csv", path = "data/scenarios"
)
)
#> Establish _targets.R and _targets_r/targets/save_case_study_data.R.
- For our Ebola case study we estimate at 60, 120, 180, and 240 days from the first cases symptom onset.
tar_group_by(
ebola_estimation_times,
data.table(
scenario = as.factor(c("60 days", "120 days", "180 days", "240 days")),
time = c(60, 120, 180, 240)
),
scenario
)
#> Establish _targets.R and _targets_r/targets/ebola_estimation_times.R.
- Truncate the available observations based on the estimation time for each scenario and the secondary event time. Restrict to a window up to 60 days before the estimation time using the primary event time.
tar_target(
truncated_ebola_obs,
case_study_data |>
filter_obs_by_obs_time(
obs_time = ebola_estimation_times[, "time"][[1]]
) |>
DT(, scenario := ebola_estimation_times[, "scenario"][[1]]) |>
DT(, obs_type := "real-time") |>
DT(ptime_lwr >= ebola_estimation_times[, "time"][[1]] - 60),
pattern = map(ebola_estimation_times)
)
#> Establish _targets.R and _targets_r/targets/truncated_ebola_obs.R.
- Create completely observed retrospective cohorts for the same estimation time windows.
tar_target(
retrospective_ebola_obs,
case_study_data |>
filter_obs_by_ptime(
obs_time = ebola_estimation_times[, "time"][[1]],
obs_at = "max_secondary"
) |>
DT(, scenario := ebola_estimation_times[, "scenario"][[1]]) |>
DT(, obs_type := "retrospective") |>
DT(ptime_lwr >= ebola_estimation_times[, "time"][[1]] - 60),
pattern = map(ebola_estimation_times)
)
#> Establish _targets.R and _targets_r/targets/retrospective_ebola_obs.R.
tar_group_by(
group_truncated_ebola_obs,
rbind(truncated_ebola_obs, retrospective_ebola_obs),
scenario, obs_type
)
#> Establish _targets.R and _targets_r/targets/group_truncated_ebola_obs.R.
- Retrospective incidence
tar_target(retro_ebola_incidence, {
event_to_incidence(retrospective_ebola_obs, by = "scenario")
})
#> Define target retro_ebola_incidence from chunk code.
#> Establish _targets.R and _targets_r/targets/retro_ebola_incidence.R.
- Add sample size to observations and data type
tar_target(
sampled_ebola_observations,
group_truncated_ebola_obs |>
as.data.table() |>
DT(, sample_size := .N) |>
DT(, data_type := "ebola_case_study"),
pattern = map(group_truncated_ebola_obs)
)
#> Establish _targets.R and _targets_r/targets/sampled_ebola_observations.R.
tar_target(list_ebola_observations, {
sampled_ebola_observations |>
split(by = c("scenario", "obs_type", "data_type"))
})
#> Define target list_ebola_observations from chunk code.
#> Establish _targets.R and _targets_r/targets/list_ebola_observations.R.
tar_target(ebola_scenarios, {
sampled_ebola_observations |>
DT(, .(scenario, obs_type, sample_size, data_type)) |>
unique() |>
DT(, id := 1:.N)
})
#> Define target ebola_scenarios from chunk code.
#> Establish _targets.R and _targets_r/targets/ebola_scenarios.R.
We explore a range of models for estimating the log normal distribution. Starting with the naive continuous model and then gradually adding complexity to adjust for censoring to dates and right truncation.
models <- list(
"Latent variable truncation and censoring adjusted" =
quote(latent_truncation_censoring_adjusted_delay),
"Truncation and censoring adjusted" =
quote(truncation_censoring_adjusted_delay),
"Truncation adjusted" = quote(truncation_adjusted_delay),
"Censoring adjusted" = quote(censoring_adjusted_delay),
"Filtered" = quote(filtered_naive_delay),
"Filtered and censoring adjusted" = quote(filtered_censoring_adjusted_delay),
"Naive" = quote(naive_delay),
"Dynamical and censoring adjusted (real-time incidence)" = quote(dynamical_censoring_adjusted_delay_wrapper),
"Dynamical and censoring adjusted (retrospective incidence)" = quote(dynamical_censoring_adjusted_delay)
)
machine_model_names <- gsub(" ", "_", tolower(names(models)))
machine_model_names <- gsub("\\(", "", machine_model_names)
machine_model_names <- gsub("\\)", "", machine_model_names)
machine_model_names <- gsub("\\.", "_", machine_model_names)
machine_model_names <- gsub("-", "_", machine_model_names)
#> Establish _targets.R and _targets_r/globals/models.R.
- Save a look-up of model names.
tar_file(
save_models,
data.table(
model = names(models), in_code = machine_model_names
) |>
rbind(data.table(
model = "Joint incidence and delay estimation",
in_code = "epinowcast"
)) |>
save_csv("models.csv", path = "data/meta")
)
#> Establish _targets.R and _targets_r/targets/save_models.R.
- Combine simulated and case study scenarios and observations
tar_target(scenarios, {
rbind(
simulated_scenarios_outbreak[, replicate := 1],
simulated_scenarios_exponential,
ebola_scenarios,
fill = TRUE
) |>
as.data.table() |>
DT(, id := 1:.N)
})
#> Define target scenarios from chunk code.
#> Establish _targets.R and _targets_r/targets/scenarios.R.
- Save scenarios for postprocessing
tar_file(
save_scenarios,
save_csv(scenarios, "scenarios.csv", path = "data/meta")
)
#> Establish _targets.R and _targets_r/targets/save_scenarios.R.
- Make a list of observations to fit models for.
tar_target(list_observations, {
c(list_simulated_observations_outbreak,
list_simulated_observations_exponential,
list_ebola_observations
)
})
#> Define target list_observations from chunk code.
#> Establish _targets.R and _targets_r/targets/list_observations.R.
- Make a list of retrospective incidence estimates by observation strata.
tar_target(list_retro_incidence, {
rbindlist(list(
merge(
scenarios[data_type %in% "outbreak"], retro_outbreak_incidence,
by = c("scenario", "distribution"), allow.cartesian = TRUE
),
merge(
scenarios[data_type %in% "exponential"], retro_exponential_incidence,
by = c("scenario", "distribution"), allow.cartesian = TRUE
),
merge(
scenarios[data_type %in% "ebola_case_study"], retro_ebola_incidence,
by = c("scenario"), allow.cartesian = TRUE
)),
fill = TRUE
) |>
setorder("id") |>
DT(, .(id, time, cases)) |>
split(by = "id")
})
#> Define target list_retro_incidence from chunk code.
#> Establish _targets.R and _targets_r/targets/list_retro_incidence.R.
- Dummy data required for model creation.
dummy_obs <- data.table::data.table(
ptime = 1, stime = 2, delay_daily = 1, delay_lwr = 1, delay_upr = 2,
ptime_lwr = 1, ptime_upr = 2, stime_lwr = 1, stime_upr = 2, obs_at = 100,
censored = "interval", censored_obs_time = 10, ptime_daily = 1,
stime_daily = 1
)
#> Establish _targets.R and _targets_r/globals/dummy_obs.R.
- Iterate over compiled models and all scenarios being investigated.
For each model:
- Create a model file
- Generate stan code
- Save the model to file
- Compile the model
- Generate stan data for each scenario
- Fit the model to each scenario
- Extract posterior samples for the parameters of interest
- Summarise the posterior parameters of interest
- Combine posterior samples and summaries with the scenarios they are linked to.
- Summarise the model run time and other diagnostics by scenario.
- Save posterior draws and model diagnostics
tar_map(
values = list(
model_name = machine_model_names,
model = models
),
names = model_name,
tar_file(
model_path,
paste0(
"data/models/", model_name, ".stan"
) |>
fs::file_create()
),
tar_target(
model_stan_code,
do.call(
model,
list(
data = dummy_obs, fn = brms::make_stancode, save_model = model_path)
)
),
tar_file(
compiled_model_path,
{
stan_code <- model_stan_code
cmdstan_model(model_path)$stan_file()
}
),
tar_target(
standata,
do.call(
model,
list(
data = list_observations[[1]], fn = brms::make_standata,
data_cases = list_retro_incidence[[1]]
)
),
pattern = map(list_observations, list_retro_incidence)
),
tar_target(
fit,
sample_model(
model = compiled_model_path,
data = standata,
scenario = scenarios,
adapt_delta = 0.95,
parallel_chains = parallel_chains,
refresh = 0,
show_messages = FALSE,
iter_sampling = 1000,
seed = 123
),
pattern = map(standata, scenarios),
deployment = "worker"
),
tar_file(
save_diagnostics,
save_csv(
fit[, -c("fit")], paste0(model_name, ".csv"), path = "data/diagnostics"
)
),
tar_target(
draws,
fit |>
extract_lognormal_draws(scenarios, from_dt = TRUE),
pattern = map(fit, scenarios)
),
tar_file(
save_lognormal_draws,
save_dataset(
draws, path = paste0("data/posteriors/", model_name),
partitioning = "id"
)
),
tar_target(
summarised_draws,
draws |>
draws_to_long() |>
summarise_draws(sf = 2)
),
tar_file(
save_summarised_draws,
save_csv(
summarised_draws, paste0(model_name, ".csv"),
path = "data/summarised_posteriors"
)
)
)
#> Establish _targets.R and _targets_r/targets/fit_models.R.
- Repeat the same steps using the
epinowcast
wrapper (which requires slightly different setup and so can’t be included in the above workflow).
list(
tar_file(
epinowcast_path,
paste0(
"data/models/", "epinowcast", ".stan"
) |>
fs::file_create()
),
tar_file(
epinowcast_compiled_model_path,
{
stan_path <- epinowcast_path
stan_code <- epinowcast::enw_model(
target_dir = here::here("data/models")
)
stan_code$stan_file()
}
),
tar_target(
epinowcast_fit,
epinowcast_delay(
model = epinowcast_compiled_model_path,
data = list_observations[[1]],
max_delay = 30,
scenario = scenarios,
adapt_delta = 0.95,
parallel_chains = parallel_chains,
refresh = 0,
show_messages = FALSE,
iter_sampling = 1000,
seed = 123,
sampler = sample_epinowcast_model,
with_epinowcast_output = FALSE
),
pattern = map(list_observations, scenarios),
deployment = "worker"
),
tar_file(
epinowcast_save_diagnostics,
epinowcast_fit |>
DT(, -c("fit")) |>
save_csv("epinowcast.csv", path = "data/diagnostics")
),
tar_target(
epinowcast_draws,
epinowcast_fit |>
extract_epinowcast_draws(scenarios, from_dt = TRUE) |>
primary_censoring_bias_correction(),
pattern = map(epinowcast_fit, scenarios)
),
tar_file(
epinowcast_save_lognormal_draws,
save_dataset(
epinowcast_draws, path = paste0("data/posteriors/", "epinowcast"),
partitioning = "id"
)
),
tar_target(
epinowcast_summarised_draws,
epinowcast_draws |>
draws_to_long() |>
summarise_draws(sf = 2)
),
tar_file(
epinowcast_save_summarised_draws,
save_csv(
epinowcast_summarised_draws, "epinowcast.csv",
path = "data/summarised_posteriors"
)
)
)
#> Establish _targets.R and _targets_r/targets/fit_epinowcast.R.