From 3f15632edc75cd0fa2d5a5fd1ed51c2311a6af22 Mon Sep 17 00:00:00 2001 From: Chris Black Date: Tue, 16 Jul 2024 07:00:16 -0700 Subject: [PATCH 1/8] fixes for new check errors in R >= 4.4.0 --- base/db/tests/Rcheck_reference.log | 3 +- base/qaqc/tests/Rcheck_reference.log | 1 + base/remote/tests/Rcheck_reference.log | 3 +- base/settings/R/check.all.settings.R | 5 +- base/settings/man/check.workflow.settings.Rd | 2 + base/settings/tests/Rcheck_reference.log | 36 +----- base/utils/tests/Rcheck_reference.log | 3 +- base/visualization/DESCRIPTION | 2 + base/visualization/R/plots.R | 2 +- base/visualization/man/theme_border.Rd | 2 +- base/visualization/tests/Rcheck_reference.log | 3 +- base/visualization/vignettes/usmap.Rmd | 20 ++- base/workflow/tests/Rcheck_reference.log | 2 +- docker/depends/pecan_package_dependencies.csv | 1 + modules/allometry/R/AllomAve.R | 29 +++-- modules/allometry/R/allom.BayesFit.R | 46 ++++--- modules/allometry/R/allom.predict.R | 25 ++-- modules/allometry/R/query.allom.data.R | 25 ++-- modules/allometry/R/read.allom.data.R | 12 +- modules/allometry/man/AllomAve.Rd | 16 +-- modules/allometry/man/allom.BayesFit.Rd | 30 ++--- modules/allometry/man/allom.predict.Rd | 8 +- modules/allometry/man/load.allom.Rd | 2 +- modules/allometry/man/query.allom.data.Rd | 6 +- modules/allometry/man/read.allom.data.Rd | 2 +- modules/allometry/tests/Rcheck_reference.log | 3 +- .../assim.batch/tests/Rcheck_reference.log | 3 +- modules/assim.sequential/R/Analysis_sda.R | 1 + modules/assim.sequential/R/GEF_Helper.R | 3 +- .../R/Multi_Site_Constructors.R | 3 + modules/assim.sequential/R/Remote_helpers.R | 1 + modules/assim.sequential/R/hop_test.R | 1 + .../assim.sequential/R/load_data_paleon_sda.R | 2 +- modules/assim.sequential/R/sda.enkf.R | 14 +-- .../assim.sequential/R/sda.enkf_MultiSite.R | 19 +-- .../assim.sequential/R/sda.enkf_refactored.R | 38 ++++-- modules/assim.sequential/R/sda_plotting.R | 3 + modules/assim.sequential/man/Contruct.Pf.Rd | 6 + modules/assim.sequential/man/GEF.Rd | 2 + .../man/SDA_remote_launcher.Rd | 2 + modules/assim.sequential/man/hop_test.Rd | 2 + .../man/interactive.plotting.sda.Rd | 6 + modules/assim.sequential/man/sda.enkf.Rd | 51 ++++---- .../man/sda.enkf.multisite.Rd | 14 ++- .../man/tobit_model_censored.Rd | 5 + .../tests/Rcheck_reference.log | 10 +- modules/benchmark/tests/Rcheck_reference.log | 3 +- modules/data.atmosphere/DESCRIPTION | 2 + modules/data.atmosphere/R/closest_xy.R | 7 +- .../data.atmosphere/R/download.Ameriflux.R | 1 + .../data.atmosphere/R/download.AmerifluxLBL.R | 1 + modules/data.atmosphere/R/download.ERA5.R | 2 + .../data.atmosphere/R/download.Fluxnet2015.R | 2 + .../R/download.FluxnetLaThuile.R | 1 + modules/data.atmosphere/R/download.GFDL.R | 2 + .../data.atmosphere/R/download.NARR_site.R | 5 +- modules/data.atmosphere/R/download.NEONmet.R | 2 + modules/data.atmosphere/R/download.PalEON.R | 4 +- modules/data.atmosphere/R/extract.nc.R | 2 + modules/data.atmosphere/R/extract_ERA5.R | 9 +- modules/data.atmosphere/R/lightME.R | 2 +- .../data.atmosphere/R/merge.met.variable.R | 15 ++- modules/data.atmosphere/R/met.process.R | 2 + modules/data.atmosphere/R/met2CF.ALMA.R | 6 + modules/data.atmosphere/R/met2CF.Ameriflux.R | 1 + .../data.atmosphere/R/met2CF.AmerifluxLBL.R | 1 + .../data.atmosphere/R/metgapfill.NOAA_GEFS.R | 34 +++--- modules/data.atmosphere/R/metgapfill.R | 2 + modules/data.atmosphere/R/nc_merge.R | 2 + modules/data.atmosphere/R/permute.nc.R | 1 + modules/data.atmosphere/R/split_wind.R | 6 +- .../R/tdm_generate_subdaily_models.R | 80 ++++++------ .../data.atmosphere/R/tdm_lm_ensemble_sims.R | 1 + modules/data.atmosphere/R/tdm_model_train.R | 3 + .../R/tdm_predict_subdaily_met.R | 2 + .../R/tdm_temporal_downscale_functions.R | 2 + modules/data.atmosphere/man/closest_xy.Rd | 11 +- modules/data.atmosphere/man/daygroup.Rd | 11 -- .../data.atmosphere/man/download.Ameriflux.Rd | 2 + .../man/download.AmerifluxLBL.Rd | 2 + .../data.atmosphere/man/download.ERA5.old.Rd | 30 ++--- .../man/download.Fluxnet2015.Rd | 4 + .../man/download.FluxnetLaThuile.Rd | 2 + modules/data.atmosphere/man/download.GFDL.Rd | 2 + .../data.atmosphere/man/download.NARR_site.Rd | 5 + .../data.atmosphere/man/download.NEONmet.Rd | 2 + .../data.atmosphere/man/extract.nc.ERA5.Rd | 9 +- modules/data.atmosphere/man/extract.nc.Rd | 2 + .../man/gen.subdaily.models.Rd | 9 +- modules/data.atmosphere/man/lightME.Rd | 2 +- .../data.atmosphere/man/lm_ensemble_sims.Rd | 2 + .../data.atmosphere/man/merge_met_variable.Rd | 9 +- modules/data.atmosphere/man/met.process.Rd | 3 + modules/data.atmosphere/man/met2CF.ALMA.Rd | 2 + .../data.atmosphere/man/met2CF.Ameriflux.Rd | 2 + .../man/met2CF.AmerifluxLBL.Rd | 2 + modules/data.atmosphere/man/met2CF.PalEON.Rd | 6 + .../man/met2CF.PalEONregional.Rd | 4 + .../man/metgapfill.NOAA_GEFS.Rd | 14 +-- modules/data.atmosphere/man/metgapfill.Rd | 2 + modules/data.atmosphere/man/model.train.Rd | 4 + modules/data.atmosphere/man/nc.merge.Rd | 2 + modules/data.atmosphere/man/permute.nc.Rd | 2 + .../man/predict_subdaily_met.Rd | 2 + modules/data.atmosphere/man/split_wind.Rd | 7 +- .../man/temporal.downscale.functions.Rd | 2 + .../tests/Rcheck_reference.log | 114 ++---------------- .../vignettes/ameriflux_demo.Rmd | 19 +-- .../vignettes/cfmet_downscaling.Rmd | 16 ++- .../vignettes/compare_narr_cruncep_met.Rmd | 48 +++++--- .../vignettes/tdm_downscaling.Rmd | 17 ++- 111 files changed, 560 insertions(+), 490 deletions(-) delete mode 100644 modules/data.atmosphere/man/daygroup.Rd diff --git a/base/db/tests/Rcheck_reference.log b/base/db/tests/Rcheck_reference.log index cbc3757f683..87ade55021c 100644 --- a/base/db/tests/Rcheck_reference.log +++ b/base/db/tests/Rcheck_reference.log @@ -62,7 +62,8 @@ The Date field is over a month old. * checking package directory ... OK * checking for future file timestamps ... OK * checking ‘build’ directory ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/base/qaqc/tests/Rcheck_reference.log b/base/qaqc/tests/Rcheck_reference.log index dcbf435c84f..593c738f659 100644 --- a/base/qaqc/tests/Rcheck_reference.log +++ b/base/qaqc/tests/Rcheck_reference.log @@ -24,6 +24,7 @@ Malformed Description field: should contain one or more complete sentences. Authors@R field gives no person with name and roles. Authors@R field gives no person with maintainer role, valid email address and non-empty name. +License stub is invalid DCF. * checking top-level files ... NOTE Non-standard file/directory found at top level: ‘README.Rmd’ diff --git a/base/remote/tests/Rcheck_reference.log b/base/remote/tests/Rcheck_reference.log index 71001fb962e..ca6f1d67b77 100644 --- a/base/remote/tests/Rcheck_reference.log +++ b/base/remote/tests/Rcheck_reference.log @@ -19,7 +19,8 @@ * checking whether package ‘PEcAn.remote’ can be installed ... OK * checking installed package size ... OK * checking package directory ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/base/settings/R/check.all.settings.R b/base/settings/R/check.all.settings.R index c7b665addca..6b6a20b3998 100644 --- a/base/settings/R/check.all.settings.R +++ b/base/settings/R/check.all.settings.R @@ -936,9 +936,10 @@ check.model.settings <- function(settings, dbcon = NULL) { return(settings) } -#' @title Check Workflow Settings +#' Check Workflow Settings #' @param settings settings file -#' @export check.workflow.settings +#' @param dbcon database connection +#' @export check.workflow.settings <- function(settings, dbcon = NULL) { # check for workflow defaults fixoutdir <- FALSE diff --git a/base/settings/man/check.workflow.settings.Rd b/base/settings/man/check.workflow.settings.Rd index 00416872adf..edf68041661 100644 --- a/base/settings/man/check.workflow.settings.Rd +++ b/base/settings/man/check.workflow.settings.Rd @@ -8,6 +8,8 @@ check.workflow.settings(settings, dbcon = NULL) } \arguments{ \item{settings}{settings file} + +\item{dbcon}{database connection} } \description{ Check Workflow Settings diff --git a/base/settings/tests/Rcheck_reference.log b/base/settings/tests/Rcheck_reference.log index e981f489931..8dcb48578fa 100644 --- a/base/settings/tests/Rcheck_reference.log +++ b/base/settings/tests/Rcheck_reference.log @@ -73,7 +73,8 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... NOTE Non-standard file/directory found at top level: ‘examples’ @@ -140,36 +141,7 @@ All user-level objects in a package should have documentation entries. See chapter ‘Writing R documentation files’ in the ‘Writing R Extensions’ manual. * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'addSecrets' - ‘force’ - -Undocumented arguments in documentation object 'check.model.settings' - ‘dbcon’ - -Undocumented arguments in documentation object 'check.run.settings' - ‘dbcon’ - -Undocumented arguments in documentation object 'check.settings' - ‘force’ - -Undocumented arguments in documentation object 'check.workflow.settings' - ‘dbcon’ - -Undocumented arguments in documentation object 'clean.settings' - ‘write’ - -Undocumented arguments in documentation object 'fix.deprecated.settings' - ‘force’ - -Undocumented arguments in documentation object 'update.settings' - ‘force’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. +* checking Rd \usage sections ... OK * checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking examples ... OK @@ -179,4 +151,4 @@ Extensions’ manual. * checking for detritus in the temp directory ... OK * DONE -Status: 5 WARNINGs, 3 NOTEs +Status: 3 WARNINGs, 2 NOTEs diff --git a/base/utils/tests/Rcheck_reference.log b/base/utils/tests/Rcheck_reference.log index 087e10096eb..ac9acddcb22 100644 --- a/base/utils/tests/Rcheck_reference.log +++ b/base/utils/tests/Rcheck_reference.log @@ -71,7 +71,8 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/base/visualization/DESCRIPTION b/base/visualization/DESCRIPTION index 43704b5fb15..c7f735f435f 100644 --- a/base/visualization/DESCRIPTION +++ b/base/visualization/DESCRIPTION @@ -38,6 +38,7 @@ Imports: stringr(>= 1.1.0) Suggests: grid, + knitr, mockery, png, raster, @@ -49,5 +50,6 @@ Copyright: Authors LazyLoad: yes LazyData: FALSE Encoding: UTF-8 +VignetteBuilder: knitr RoxygenNote: 7.3.2 Roxygen: list(markdown = TRUE) diff --git a/base/visualization/R/plots.R b/base/visualization/R/plots.R index 3f1395305bc..85b6534142f 100644 --- a/base/visualization/R/plots.R +++ b/base/visualization/R/plots.R @@ -263,7 +263,7 @@ plot_data <- function(trait.data, base.plot = NULL, ymax) { ##' ##' @return adds borders to ggplot as a side effect ##' @author Rudolf Cardinal -##' @author \url{ggplot2 google group}{https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE} +##' @author [ggplot2 google group](https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE) ##' @examples ##' \dontrun{ ##' df = data.frame( x=c(1,2,3), y=c(4,5,6) ) diff --git a/base/visualization/man/theme_border.Rd b/base/visualization/man/theme_border.Rd index cc77306dc9b..a003e9ffea4 100644 --- a/base/visualization/man/theme_border.Rd +++ b/base/visualization/man/theme_border.Rd @@ -47,5 +47,5 @@ ggplot(data=df, aes(x=x, y=y)) + geom_point() + theme_bw() + \author{ Rudolf Cardinal -\url{ggplot2 google group}{https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE} +\href{https://groups.google.com/forum/?fromgroups#!topic/ggplot2/-ZjRE2OL8lE}{ggplot2 google group} } diff --git a/base/visualization/tests/Rcheck_reference.log b/base/visualization/tests/Rcheck_reference.log index a578ea6cf0c..0c65006916e 100644 --- a/base/visualization/tests/Rcheck_reference.log +++ b/base/visualization/tests/Rcheck_reference.log @@ -71,7 +71,8 @@ The Date field is over a month old. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/base/visualization/vignettes/usmap.Rmd b/base/visualization/vignettes/usmap.Rmd index dff4e2b9ab4..c7d9a9ae0b3 100644 --- a/base/visualization/vignettes/usmap.Rmd +++ b/base/visualization/vignettes/usmap.Rmd @@ -1,7 +1,19 @@ +--- +title: "Maps" +output: html_vignette +vignette: > + %\VignetteIndexEntry{Maps} + %\VignetteEngine{knitr::rmarkdown} +--- + + + Map ======================================================== -```{r} +(all code chunks are disabled because vignette build was throwing errors. TODO: debug and re-enable.) + +```{r,eval=FALSE} require(raster) require(sp) require(ggplot2) @@ -20,7 +32,7 @@ spplot(spdf) ### Plot all maps for BETYdb -```{r} +```{r,eval=FALSE} files <- dir("~/dev/bety/local/modelout", pattern="grid.csv", full.names=TRUE) yieldfiles <- files[!grepl("evapotranspiration", files)] etfiles <- files[grepl("evapotranspiration", files)] @@ -42,7 +54,7 @@ for(file in etfiles){ ``` ### Misc additional code -```{r} +```{r,eval=FALSE} # Make an evenly spaced raster, the same extent as original data e <- extent( spdf ) @@ -63,7 +75,7 @@ ggplot( NULL ) + geom_raster( data = rdf , aes( x , y , fill = layer ) ) ``` -```{r} +```{r,eval=FALSE} # from http://gis.stackexchange.com/a/20052/3218 require(rgdal) proj4string(spdf) <- CRS("+init=epsg:4326") diff --git a/base/workflow/tests/Rcheck_reference.log b/base/workflow/tests/Rcheck_reference.log index 3d3e30ec6d6..f6fb0254800 100644 --- a/base/workflow/tests/Rcheck_reference.log +++ b/base/workflow/tests/Rcheck_reference.log @@ -70,7 +70,7 @@ Author field differs from that derived from Authors@R Maintainer field differs from that derived from Authors@R Maintainer: ‘David LeBauer ’ Authors@R: ‘David LeBauer ’ - +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/docker/depends/pecan_package_dependencies.csv b/docker/depends/pecan_package_dependencies.csv index f8894581945..54bde494f59 100644 --- a/docker/depends/pecan_package_dependencies.csv +++ b/docker/depends/pecan_package_dependencies.csv @@ -122,6 +122,7 @@ "jsonlite","*","models/stics","Imports",FALSE "jsonlite","*","modules/data.atmosphere","Imports",FALSE "jsonlite","*","modules/data.remote","Suggests",FALSE +"knitr","*","base/visualization","Suggests",FALSE "knitr",">= 1.42","base/db","Suggests",FALSE "knitr",">= 1.42","base/qaqc","Suggests",FALSE "knitr",">= 1.42","modules/allometry","Suggests",FALSE diff --git a/modules/allometry/R/AllomAve.R b/modules/allometry/R/AllomAve.R index 47ea7354891..f6aba498caf 100644 --- a/modules/allometry/R/AllomAve.R +++ b/modules/allometry/R/AllomAve.R @@ -7,16 +7,27 @@ # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -#' @title AllomAve -#' @name AllomAve -#' @aliases AllomAve +#' AllomAve +#' +#' Allometery wrapper function that handles loading and subsetting the data, +#' fitting the Bayesian models, and generating diagnostic figures. Set up to loop over +#' multiple PFTs and components. +#' Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. +#' +#' There are two usages of this function. +#' When running 'online' (connected to the PEcAn database), pass the database connection, +#' con, and the pfts subsection of the PEcAn settings. +#' When running 'stand alone' pass the pft list mapping species to species codes +#' and the file paths to the allometry table and field data (optional) +#' #' @param pfts pft list from PEcAn settings (if con) OR list of pft spcd's #' If the latter, the names within the list are used to identify PFTs -#' \itemize{ +#' \describe{ #' \item{'acronym'}{ - USDA species acronyms (see plants.usda.gov), used with FIELD data (vector)} #' \item{'spcd'}{ - USFS species codes, use with PARM data (vector)} #' } -#' @param components IDs for allometry components from Jenkins et al 2004 Table 5. Default is stem biomass (6). See data(allom.components) +#' @param components IDs for allometry components from Jenkins et al 2004 Table 5. +#' Default is stem biomass (6). See data(allom.components) #' @param outdir output directory files are written to. Default is getwd() #' @param con database connection #' @param field path(s) to raw data files @@ -27,14 +38,6 @@ #' @param dmax maximum dbh of interest #' @return nested list of parameter summary statistics #' @export -#' @description allometery wrapper function that handles loading and subsetting the data, -#' fitting the Bayesian models, and generating diagnostic figures. Set up to loop over -#' multiple PFTs and components. -#' Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. -#' -#' @details There are two usages of this function. -#' When running 'online' (connected to the PEcAn database), pass the database connection, con, and the pfts subsection of the PEcAn settings. -#' When running 'stand alone' pass the pft list mapping species to species codes and the file paths to the allometry table and field data (optional) #' #' @examples #' diff --git a/modules/allometry/R/allom.BayesFit.R b/modules/allometry/R/allom.BayesFit.R index beeb35ea1d4..7ddc1011ac2 100644 --- a/modules/allometry/R/allom.BayesFit.R +++ b/modules/allometry/R/allom.BayesFit.R @@ -7,35 +7,38 @@ # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -#' @title allom.BayesFit -#' @name allom.BayesFit -#' @aliases allom.BayesFit +#' allom.BayesFit #' -#' @description Module to fit a common power-law allometric model +#' Module to fit a common power-law allometric model #' to a mixture of raw data and allometric equations #' in a Heirarchical Bayes framework with multiple imputation #' of the allometric data #' +#' dependencies: requires MCMCpack and mvtnorm +#' +#' note: runs 1 chain, but multiple chains can be simulated by +#' multiple function calls +#' #' @param allom - object (usually generated by query.allom.data) which #' needs to be a list with two entries: #' 'field' - contains a list, each entry for which is #' a data frame with 'x' and 'y'. Can be NULL #' 'parm' - a single data frame with the following components: -#' \itemize{ -#' \item{n} {sample size} -#' \item{a} {eqn coefficient} -#' \item{b} {eqn coefficient} -#' \item{c} {eqn coefficient} -#' \item{d} {eqn coefficient} -#' \item{e} {eqn coefficient} -#' \item{se} {standard error} -#' \item{eqn} {sample size} -#' \item{Xmin} {smallest tree sampled (cm)} -#' \item{Xmax} {largest tree sampled (cm)} -#' \item{Xcor} {units correction on X} -#' \item{Ycor} {units correction on Y} -#' \item{Xtype} {type of measurement on the X} -#' \item{spp} { - USFS species code} +#' \describe{ +#' \item{n}{sample size} +#' \item{a}{eqn coefficient} +#' \item{b}{eqn coefficient} +#' \item{c}{eqn coefficient} +#' \item{d}{eqn coefficient} +#' \item{e}{eqn coefficient} +#' \item{se}{standard error} +#' \item{eqn}{sample size} +#' \item{Xmin}{smallest tree sampled (cm)} +#' \item{Xmax}{largest tree sampled (cm)} +#' \item{Xcor}{units correction on X} +#' \item{Ycor}{units correction on Y} +#' \item{Xtype}{type of measurement on the X} +#' \item{spp}{ - USFS species code} #' } #' @param nrep - number of MCMC replicates #' @@ -43,11 +46,6 @@ #' @param dmin minimum dbh of interest #' @param dmax maximum dbh of interest - -#' @details dependencies: requires MCMCpack and mvtnorm -#' -#' note: runs 1 chain, but multiple chains can be simulated by -#' multiple function calls #' #' @return returns MCMC chain and ONE instance of 'data' #' note: in many cases the estimates are multiply imputed diff --git a/modules/allometry/R/allom.predict.R b/modules/allometry/R/allom.predict.R index 16f6210919f..5dc032caa9f 100644 --- a/modules/allometry/R/allom.predict.R +++ b/modules/allometry/R/allom.predict.R @@ -7,27 +7,29 @@ # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -#' @title allom.predict -#' @name allom.predict -#' @aliases allom.predict +#' allom.predict +#' +#' Function for making tree-level Monte Carlo predictions +#' from allometric equations estimated from the PEcAn allometry module #' #' @param object Allometry model object. Option includes -#'\itemize{ +#'\describe{ #' \item{'list of mcmc'}{ - mcmc outputs in a list by PFT then component} #' \item{'vector of file paths'}{ - path(s) to AllomAve RData files} #' \item{'directory where files are located}{ - } #' } #' @param dbh Diameter at Breast Height (cm) -#' @param pft Plant Functional Type. Needs to match the name used in AllomAve. Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh +#' @param pft Plant Functional Type. Needs to match the name used in AllomAve. +#' Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh #' @param component Which component to predict. Can be NULL if only one component was analysed in AllomAve. #' @param n Number of Monte Carlo samples. Defaults to the same number as in the MCMC object #' @param use c('Bg','mu','best') #' @param interval c('none','confidence','prediction') default is prediction +#' @param single.tree logical: Is this a DBH time series from one indidual tree? +#' If TRUE, will use a fixed error for all draws. #' #' @return matrix of Monte Carlo predictions that has n rows and one column per DBH #' -#' @description Function for making tree-level Monte Carlo predictions -#' from allometric equations estimated from the PEcAn allometry module #' #' @examples #' @@ -240,19 +242,18 @@ allom.predict <- function(object, dbh, pft = NULL, component = NULL, n = NULL, u return(out) } # allom.predict -#' @title load.allom -#' @name load.allom +#' load.allom +#' +#' loads allom files #' #' @param object Allometry model object. Option includes -#'\itemize{ +#'\describe{ #' \item{'vector of file paths'}{ - path(s) to AllomAve RData files} #' \item{'directory where files are located}{ - } #' } #' #' @return mcmc outputs in a list by PFT then component #' -#' @description loads allom files -#' #' @examples #' #' \dontrun{ diff --git a/modules/allometry/R/query.allom.data.R b/modules/allometry/R/query.allom.data.R index f5942adf8e2..8234e0b2dfd 100644 --- a/modules/allometry/R/query.allom.data.R +++ b/modules/allometry/R/query.allom.data.R @@ -7,15 +7,16 @@ # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -#' @title query.allom.data -#' @name query.allom.data -#' @description +#' query.allom.data +#' #' Module to grab allometric information from the raw data table #' Will grab both original field data and tallied equations #' #' Tallied equation format based on Jenkins et al 2004 USFS #' General Technical Report NE-319 #' +#' database is assumed to conform to the PEcAn Schema +#' #' @author Michael Dietze #' #' @param pft_name name of Plant Functional Type to be queried @@ -23,7 +24,6 @@ #' @param con open database connection #' @param nsim number of pseudo-data simulations for estimating SE #' -#' @details database is assumed to conform to the PEcAn Schema query.allom.data <- function(pft_name, variable, con, nsim = 10000) { ## check validity of inputs @@ -80,21 +80,22 @@ query.allom.data <- function(pft_name, variable, con, nsim = 10000) { return(allom) } # query.allom.data -#' @title nu -#' @name nu +#' nu +#' +#' converts factors to numeric +#' #' @param x data -#' @description converts factors to numeric nu <- function(x) { as.numeric(as.character(x)) } # nu -#' @title AllomUnitCoef -#' @name AllomUnitCoef -#' @param x units: mm, cm, cm2, m, in, g, kg, lb, Mg -#' @param tp diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc' -#' @description +#' AllomUnitCoef +#' #' converts length units FROM cm TO specified units #' converts mass units TO kg FROM specificed units +#' +#' @param x units: mm, cm, cm2, m, in, g, kg, lb, Mg +#' @param tp diameter type, leave NULL if DBH. Options: 'd.b.h.^2','cbh','crc' AllomUnitCoef <- function(x, tp = NULL) { y <- rep(1, length(x)) diff --git a/modules/allometry/R/read.allom.data.R b/modules/allometry/R/read.allom.data.R index 004fe88ebb1..09c560d1c63 100644 --- a/modules/allometry/R/read.allom.data.R +++ b/modules/allometry/R/read.allom.data.R @@ -7,13 +7,15 @@ # http://opensource.ncsa.illinois.edu/license.html #------------------------------------------------------------------------------- -#' @title read.allom.data -#' @name read.allom.data +#' read.allom.data #' -#' @description Extracts PFT- and component-specific data and allometeric equations from the specified files. +#' Extracts PFT- and component-specific data and allometeric equations from the specified files. #' +#' This code also estimates the standard error from R-squared, +#' which is required to simulate pseudodata from the allometric eqns. +#' #' @param pft.data PFT dataframe -#' \itemize{ +#' \describe{ #' \item{acronym}{USDA species acronyms, used with FIELD data (vector)} #' \item{spcd}{USFS species codes, use with TALLY data (vector)} #' } @@ -23,8 +25,6 @@ #' @param nsim number of Monte Carlo draws in numerical transforms #' @return \item{field}{PFT-filtered field Data} #' \item{parm}{Component- and PFT-filtered Allometric Equations} -#' @details This code also estimates the standard error from R-squared, -#' which is required to simulate pseudodata from the allometric eqns. read.allom.data <- function(pft.data, component, field, parm, nsim = 10000) { allom <- list(parm = NULL, field = NULL) diff --git a/modules/allometry/man/AllomAve.Rd b/modules/allometry/man/AllomAve.Rd index f3066a2f276..8daa36d9453 100644 --- a/modules/allometry/man/AllomAve.Rd +++ b/modules/allometry/man/AllomAve.Rd @@ -20,12 +20,13 @@ AllomAve( \arguments{ \item{pfts}{pft list from PEcAn settings (if con) OR list of pft spcd's If the latter, the names within the list are used to identify PFTs -\itemize{ +\describe{ \item{'acronym'}{ - USDA species acronyms (see plants.usda.gov), used with FIELD data (vector)} \item{'spcd'}{ - USFS species codes, use with PARM data (vector)} }} -\item{components}{IDs for allometry components from Jenkins et al 2004 Table 5. Default is stem biomass (6). See data(allom.components)} +\item{components}{IDs for allometry components from Jenkins et al 2004 Table 5. +Default is stem biomass (6). See data(allom.components)} \item{outdir}{output directory files are written to. Default is getwd()} @@ -47,15 +48,16 @@ If the latter, the names within the list are used to identify PFTs nested list of parameter summary statistics } \description{ -allometery wrapper function that handles loading and subsetting the data, +Allometery wrapper function that handles loading and subsetting the data, fitting the Bayesian models, and generating diagnostic figures. Set up to loop over multiple PFTs and components. Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. -} -\details{ + There are two usages of this function. -When running 'online' (connected to the PEcAn database), pass the database connection, con, and the pfts subsection of the PEcAn settings. -When running 'stand alone' pass the pft list mapping species to species codes and the file paths to the allometry table and field data (optional) +When running 'online' (connected to the PEcAn database), pass the database connection, + con, and the pfts subsection of the PEcAn settings. +When running 'stand alone' pass the pft list mapping species to species codes + and the file paths to the allometry table and field data (optional) } \examples{ diff --git a/modules/allometry/man/allom.BayesFit.Rd b/modules/allometry/man/allom.BayesFit.Rd index 66bc5933740..7baa69ba2af 100644 --- a/modules/allometry/man/allom.BayesFit.Rd +++ b/modules/allometry/man/allom.BayesFit.Rd @@ -12,21 +12,21 @@ allom.BayesFit(allom, nrep = 10000, form = "power", dmin = 0.1, dmax = 500) 'field' - contains a list, each entry for which is a data frame with 'x' and 'y'. Can be NULL 'parm' - a single data frame with the following components: - \itemize{ - \item{n} {sample size} - \item{a} {eqn coefficient} - \item{b} {eqn coefficient} - \item{c} {eqn coefficient} - \item{d} {eqn coefficient} - \item{e} {eqn coefficient} - \item{se} {standard error} - \item{eqn} {sample size} - \item{Xmin} {smallest tree sampled (cm)} - \item{Xmax} {largest tree sampled (cm)} - \item{Xcor} {units correction on X} - \item{Ycor} {units correction on Y} - \item{Xtype} {type of measurement on the X} - \item{spp} { - USFS species code} + \describe{ + \item{n}{sample size} + \item{a}{eqn coefficient} + \item{b}{eqn coefficient} + \item{c}{eqn coefficient} + \item{d}{eqn coefficient} + \item{e}{eqn coefficient} + \item{se}{standard error} + \item{eqn}{sample size} + \item{Xmin}{smallest tree sampled (cm)} + \item{Xmax}{largest tree sampled (cm)} + \item{Xcor}{units correction on X} + \item{Ycor}{units correction on Y} + \item{Xtype}{type of measurement on the X} + \item{spp}{ - USFS species code} }} \item{nrep}{- number of MCMC replicates} diff --git a/modules/allometry/man/allom.predict.Rd b/modules/allometry/man/allom.predict.Rd index e48f5d21a51..0c11064a542 100644 --- a/modules/allometry/man/allom.predict.Rd +++ b/modules/allometry/man/allom.predict.Rd @@ -17,7 +17,7 @@ allom.predict( } \arguments{ \item{object}{Allometry model object. Option includes -\itemize{ +\describe{ \item{'list of mcmc'}{ - mcmc outputs in a list by PFT then component} \item{'vector of file paths'}{ - path(s) to AllomAve RData files} \item{'directory where files are located}{ - } @@ -25,7 +25,8 @@ allom.predict( \item{dbh}{Diameter at Breast Height (cm)} -\item{pft}{Plant Functional Type. Needs to match the name used in AllomAve. Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh} +\item{pft}{Plant Functional Type. Needs to match the name used in AllomAve. +Can be NULL if only one PFT/species exists, otherwise needs to the same length as dbh} \item{component}{Which component to predict. Can be NULL if only one component was analysed in AllomAve.} @@ -34,6 +35,9 @@ allom.predict( \item{use}{c('Bg','mu','best')} \item{interval}{c('none','confidence','prediction') default is prediction} + +\item{single.tree}{logical: Is this a DBH time series from one indidual tree? +If TRUE, will use a fixed error for all draws.} } \value{ matrix of Monte Carlo predictions that has n rows and one column per DBH diff --git a/modules/allometry/man/load.allom.Rd b/modules/allometry/man/load.allom.Rd index 23389644719..4b9f0415485 100644 --- a/modules/allometry/man/load.allom.Rd +++ b/modules/allometry/man/load.allom.Rd @@ -8,7 +8,7 @@ load.allom(object) } \arguments{ \item{object}{Allometry model object. Option includes -\itemize{ +\describe{ \item{'vector of file paths'}{ - path(s) to AllomAve RData files} \item{'directory where files are located}{ - } }} diff --git a/modules/allometry/man/query.allom.data.Rd b/modules/allometry/man/query.allom.data.Rd index 7185ba0b56e..dfdf8591b1c 100644 --- a/modules/allometry/man/query.allom.data.Rd +++ b/modules/allometry/man/query.allom.data.Rd @@ -18,11 +18,11 @@ query.allom.data(pft_name, variable, con, nsim = 10000) \description{ Module to grab allometric information from the raw data table Will grab both original field data and tallied equations - -Tallied equation format based on Jenkins et al 2004 USFS -General Technical Report NE-319 } \details{ +Tallied equation format based on Jenkins et al 2004 USFS +General Technical Report NE-319 + database is assumed to conform to the PEcAn Schema } \author{ diff --git a/modules/allometry/man/read.allom.data.Rd b/modules/allometry/man/read.allom.data.Rd index e697399a122..cffd3a490ff 100644 --- a/modules/allometry/man/read.allom.data.Rd +++ b/modules/allometry/man/read.allom.data.Rd @@ -8,7 +8,7 @@ read.allom.data(pft.data, component, field, parm, nsim = 10000) } \arguments{ \item{pft.data}{PFT dataframe -\itemize{ +\describe{ \item{acronym}{USDA species acronyms, used with FIELD data (vector)} \item{spcd}{USFS species codes, use with TALLY data (vector)} }} diff --git a/modules/allometry/tests/Rcheck_reference.log b/modules/allometry/tests/Rcheck_reference.log index 8f31a1cde0b..2f3c924d380 100644 --- a/modules/allometry/tests/Rcheck_reference.log +++ b/modules/allometry/tests/Rcheck_reference.log @@ -20,7 +20,8 @@ Requires (indirectly) orphaned package: ‘udunits2’ * checking whether package ‘PEcAn.allometry’ can be installed ... OK * checking installed package size ... OK * checking package directory ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/modules/assim.batch/tests/Rcheck_reference.log b/modules/assim.batch/tests/Rcheck_reference.log index ceaf7cc3ef1..41a05fee54e 100644 --- a/modules/assim.batch/tests/Rcheck_reference.log +++ b/modules/assim.batch/tests/Rcheck_reference.log @@ -25,7 +25,8 @@ use conditionally. * checking package directory ... OK * checking for future file timestamps ... OK * checking ‘build’ directory ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/modules/assim.sequential/R/Analysis_sda.R b/modules/assim.sequential/R/Analysis_sda.R index 5c209b4d1d7..e2af6bbfd0f 100644 --- a/modules/assim.sequential/R/Analysis_sda.R +++ b/modules/assim.sequential/R/Analysis_sda.R @@ -98,6 +98,7 @@ EnKF<-function(settings, Forecast, Observed, H, extraArg=NULL, ...){ ##' @param settings pecan standard settings list. ##' @param Forecast A list containing the forecasts variables including Q (process variance) and X (a dataframe of forecast state variables for different ensemble) ##' @param Observed A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables) +##' @param H not used ##' @param extraArg This argument is a list containing aqq, bqq and t. The aqq and bqq are shape parameters estimated over time for the process covariance and t gives the time in terms of index of obs.list. See Details. ##' @param nitr Number of iterations to run each MCMC chain. ##' @param nburnin Number of initial, pre-thinning, MCMC iterations to discard. diff --git a/modules/assim.sequential/R/GEF_Helper.R b/modules/assim.sequential/R/GEF_Helper.R index 6f4573941f7..9012cc803c5 100644 --- a/modules/assim.sequential/R/GEF_Helper.R +++ b/modules/assim.sequential/R/GEF_Helper.R @@ -5,8 +5,9 @@ #' @param var.names (character) variable names. #' @param mu.f (numeric) forecast mean values. #' @param Pf (numeric) forecast covariance matrix. +#' @param t (numeric) timestep. If t=1, initial values are imputed for zero values in mu.f #' -#' @return +#' @return list with updated mu.f, pf, X, and indication of which y values are censored #' @export #' #' @examples diff --git a/modules/assim.sequential/R/Multi_Site_Constructors.R b/modules/assim.sequential/R/Multi_Site_Constructors.R index 29603586007..88a30bd56fb 100755 --- a/modules/assim.sequential/R/Multi_Site_Constructors.R +++ b/modules/assim.sequential/R/Multi_Site_Constructors.R @@ -6,6 +6,9 @@ ##' @param var.names vector names of state variable names. ##' @param X a matrix of state variables. In this matrix rows represent ensembles, while columns show the variables for different sites. ##' @param localization.FUN This is the function that performs the localization of the Pf matrix and it returns a localized matrix with the same dimensions. +##' @param t not used +##' @param blocked.dis passed to `localization.FUN` +##' @param ... passed to `localization.FUN` ##' @description The argument X needs to have an attribute pointing the state variables to their corresponding site. This attribute needs to be called `Site`. ##' At the moment, the cov between state variables at blocks defining the cov between two sites are assumed zero. ##' @return It returns the var-cov matrix of state variables at multiple sites. diff --git a/modules/assim.sequential/R/Remote_helpers.R b/modules/assim.sequential/R/Remote_helpers.R index 139f289f811..107d4f8a7f7 100644 --- a/modules/assim.sequential/R/Remote_helpers.R +++ b/modules/assim.sequential/R/Remote_helpers.R @@ -67,6 +67,7 @@ Obs.data.prepare.MultiSite <- function(obs.path, site.ids) { #' #' @param settingPath The Path to the setting that will run SDA #' @param ObsPath Path to the obs data which is expected to be an .Rdata. +#' @param run.bash.args Shell commands to be run on the remote host before launching the SDA. See examples #' #' @export #' @return This function returns a list of two pieces of information. One the remote path that SDA is running and the PID of the active run. diff --git a/modules/assim.sequential/R/hop_test.R b/modules/assim.sequential/R/hop_test.R index 5b69ad4ee7d..4c79b437e30 100644 --- a/modules/assim.sequential/R/hop_test.R +++ b/modules/assim.sequential/R/hop_test.R @@ -4,6 +4,7 @@ ##' ##' @param settings SDA PEcAn settings object ##' @param nyear number of years to run hop test over +##' @param ens.runid run id. If not provided, is looked up from [settings$outdir]/runs.txt ##' ##' @description Hop test. This script tests that the model successfully reads it's own restart and can restart without loss of information. ##' diff --git a/modules/assim.sequential/R/load_data_paleon_sda.R b/modules/assim.sequential/R/load_data_paleon_sda.R index fb3e8d70ecb..a77596796ed 100644 --- a/modules/assim.sequential/R/load_data_paleon_sda.R +++ b/modules/assim.sequential/R/load_data_paleon_sda.R @@ -294,7 +294,7 @@ load_data_paleon_sda <- function(settings){ ### Error Message for no data product if(format_id[[i]] != '1000000040' & format_id[[i]] != '1000000058'){ - PEcAn.logger::logger.severe('ERROR: This data format has not been added to this function (ツ)_/¯ ') + PEcAn.logger::logger.severe('ERROR: This data format has not been added to this function :(') } } diff --git a/modules/assim.sequential/R/sda.enkf.R b/modules/assim.sequential/R/sda.enkf.R index b6d7ce56040..3dff5d07c79 100644 --- a/modules/assim.sequential/R/sda.enkf.R +++ b/modules/assim.sequential/R/sda.enkf.R @@ -1,5 +1,10 @@ -##' @title sda.enkf -##' @name sda.enkf +##' State Variable Data Assimilation: Ensemble Kalman Filter +##’ +##’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), +##' a new workflow folder is created and the previous forecast for the start_time is copied over. +##' During restart the initial run before the loop is skipped, with the info being populated from the previous run. +##' The function then dives right into the first Analysis, then continues on like normal. +##' ##' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} ##' ##' @param settings PEcAn settings object @@ -10,11 +15,6 @@ ##' @param adjustment flag for using ensemble adjustment filter or not ##' @param restart Used for iterative updating previous forecasts. This is a list that includes ens.inputs, the list of inputs by ensemble member, params, the parameters, and old_outdir, the output directory from the previous workflow. These three things are needed to ensure that if a new workflow is started that ensemble members keep there run-specific met and params. See Details ##' -##’ @details -##’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), a new workflow folder is created and the previous forecast for the start_time is copied over. During restart the initial run before the loop is skipped, with the info being populated from the previous run. The function then dives right into the first Analysis, then continues on like normal. -##' -##' @description State Variable Data Assimilation: Ensemble Kalman Filter -##' ##' ##' @return NONE ##' @export diff --git a/modules/assim.sequential/R/sda.enkf_MultiSite.R b/modules/assim.sequential/R/sda.enkf_MultiSite.R index b46d954ab95..81b77c6ba33 100644 --- a/modules/assim.sequential/R/sda.enkf_MultiSite.R +++ b/modules/assim.sequential/R/sda.enkf_MultiSite.R @@ -1,5 +1,14 @@ -#' @title sda.enkf.multisite -#' @name sda.enkf.multisite +#' State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter +#' +#' Check out SDA_control function for more details on the control arguments. +#' +#' Restart mode: Basic idea is that during a restart (primary case envisioned +#' as an iterative forecast), a new workflow folder is created and the previous +#' forecast for the start_time is copied over. During restart the initial run +#' before the loop is skipped, with the info being populated from the previous +#' run. The function then dives right into the first Analysis, then continues +#' on like normal. +#' #' @author Michael Dietze, Ann Raiho and Alexis Helgeson \email{dietze@@bu.edu} #' #' @param settings PEcAn settings object @@ -22,11 +31,7 @@ #' `forceRun` decide if we want to proceed the Bayesian MCMC sampling without observations; #' `run_parallel` decide if we want to run the SDA under parallel mode for the `future_map` function; #' `MCMC.args` include lists for controling the MCMC sampling process (iteration, nchains, burnin, and nthin.). -#' -#’ @details -#’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), a new workflow folder is created and the previous forecast for the start_time is copied over. During restart the initial run before the loop is skipped, with the info being populated from the previous run. The function then dives right into the first Analysis, then continues on like normal. -#' -#' @description State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter. Check out SDA_control function for more details on the control arguments. +#' @param ... Additional arguments, currently ignored #' #' @return NONE #' @import nimble furrr diff --git a/modules/assim.sequential/R/sda.enkf_refactored.R b/modules/assim.sequential/R/sda.enkf_refactored.R index 836ef604afb..f87b5eecc41 100644 --- a/modules/assim.sequential/R/sda.enkf_refactored.R +++ b/modules/assim.sequential/R/sda.enkf_refactored.R @@ -1,25 +1,37 @@ -#' @title sda.enkf -#' @name sda.enkf +#' State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter +#' +#' Restart mode: Basic idea is that during a restart (primary case +#' envisioned as an iterative forecast), a new workflow folder is created and +#' the previous forecast for the start_time is copied over. During restart the +#' initial run before the loop is skipped, with the info being populated from +#' the previous run. The function then dives right into the first Analysis, +#' then continues on like normal. +#' #' @author Michael Dietze and Ann Raiho \email{dietze@@bu.edu} #' #' @param settings PEcAn settings object -#' @param obs.mean List of dataframe of observation means, named with observation datetime. -#' @param obs.cov List of covariance matrices of state variables , named with observation datetime. -#' @param Q Process covariance matrix given if there is no data to estimate it. -#' @param restart Used for iterative updating previous forecasts. When the restart is TRUE it read the object in SDA folder written from previous SDA. -#' @param control List of flags controlling the behaviour of the SDA. trace for reporting back the SDA outcomes, interactivePlot for plotting the outcomes after each step, -#' TimeseriesPlot for post analysis examination, BiasPlot for plotting the correlation between state variables, plot.title is the title of post analysis plots and debug mode allows for pausing the code and examining the variables inside the function. +#' @param obs.mean List of dataframe of observation means, named with +#' observation datetime. +#' @param obs.cov List of covariance matrices of state variables , named with +#' observation datetime. +#' @param Q Process covariance matrix given if there is no data to +#' estimate it. +#' @param restart Used for iterative updating previous forecasts. When the +#' restart is TRUE it read the object in SDA folder written from previous +#' SDA. +#' @param control List of flags controlling the behaviour of the SDA. trace +#' for reporting back the SDA outcomes, interactivePlot for plotting the +#' outcomes after each step, TimeseriesPlot for post analysis examination, +#' BiasPlot for plotting the correlation between state variables, plot.title +#' is the title of post analysis plots and debug mode allows for pausing the +#' code and examining the variables inside the function. +#' @param ... Additional arguments, currently ignored #' -#’ @details -#’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), a new workflow folder is created and the previous forecast for the start_time is copied over. During restart the initial run before the loop is skipped, with the info being populated from the previous run. The function then dives right into the first Analysis, then continues on like normal. -#' -#' @description State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter #' #' @return NONE #' @import nimble #' @export #' - sda.enkf <- function(settings, obs.mean, obs.cov, diff --git a/modules/assim.sequential/R/sda_plotting.R b/modules/assim.sequential/R/sda_plotting.R index 5bd30c3c64b..a9a33b614f0 100755 --- a/modules/assim.sequential/R/sda_plotting.R +++ b/modules/assim.sequential/R/sda_plotting.R @@ -334,6 +334,7 @@ postana.bias.plotting.sda<-function(settings, t, obs.times, obs.mean, obs.cov, o } ##' @rdname interactive.plotting.sda +#' @param aqq,bqq shape parameters estimated over time for the process covariance ##' @export postana.bias.plotting.sda.corr<-function(t, obs.times, X, aqq, bqq){ @@ -569,6 +570,8 @@ post.analysis.ggplot.violin <- function(settings, t, obs.times, obs.mean, obs.co } ##' @rdname interactive.plotting.sda +#' @param facetg logical: Create a subpanel for each variable? +#' @param readsFF optional forward forecast ##' @export post.analysis.multisite.ggplot <- function(settings, t, obs.times, obs.mean, obs.cov, FORECAST, ANALYSIS, plot.title=NULL, facetg=FALSE, readsFF=NULL, Add_Map=FALSE){ diff --git a/modules/assim.sequential/man/Contruct.Pf.Rd b/modules/assim.sequential/man/Contruct.Pf.Rd index ea6485de76a..acdaad508e6 100644 --- a/modules/assim.sequential/man/Contruct.Pf.Rd +++ b/modules/assim.sequential/man/Contruct.Pf.Rd @@ -22,6 +22,12 @@ Contruct.Pf( \item{X}{a matrix of state variables. In this matrix rows represent ensembles, while columns show the variables for different sites.} \item{localization.FUN}{This is the function that performs the localization of the Pf matrix and it returns a localized matrix with the same dimensions.} + +\item{t}{not used} + +\item{blocked.dis}{passed to `localization.FUN`} + +\item{...}{passed to `localization.FUN`} } \value{ It returns the var-cov matrix of state variables at multiple sites. diff --git a/modules/assim.sequential/man/GEF.Rd b/modules/assim.sequential/man/GEF.Rd index a0c9f0aad2d..accb0eb31e9 100644 --- a/modules/assim.sequential/man/GEF.Rd +++ b/modules/assim.sequential/man/GEF.Rd @@ -25,6 +25,8 @@ GEF.MultiSite(settings, Forecast, Observed, H, extraArg, ...) \item{Observed}{A list containing the observed variables including R (cov of observed state variables) and Y (vector of estimated mean of observed state variables)} +\item{H}{not used} + \item{extraArg}{This argument is a list containing aqq, bqq and t. The aqq and bqq are shape parameters estimated over time for the process covariance and t gives the time in terms of index of obs.list. See Details.} \item{nitr}{Number of iterations to run each MCMC chain.} diff --git a/modules/assim.sequential/man/SDA_remote_launcher.Rd b/modules/assim.sequential/man/SDA_remote_launcher.Rd index df2b5bb10fb..a90ff7758d0 100644 --- a/modules/assim.sequential/man/SDA_remote_launcher.Rd +++ b/modules/assim.sequential/man/SDA_remote_launcher.Rd @@ -10,6 +10,8 @@ SDA_remote_launcher(settingPath, ObsPath, run.bash.args) \item{settingPath}{The Path to the setting that will run SDA} \item{ObsPath}{Path to the obs data which is expected to be an .Rdata.} + +\item{run.bash.args}{Shell commands to be run on the remote host before launching the SDA. See examples} } \value{ This function returns a list of two pieces of information. One the remote path that SDA is running and the PID of the active run. diff --git a/modules/assim.sequential/man/hop_test.Rd b/modules/assim.sequential/man/hop_test.Rd index 7a22ae812a2..b3b804c9632 100644 --- a/modules/assim.sequential/man/hop_test.Rd +++ b/modules/assim.sequential/man/hop_test.Rd @@ -9,6 +9,8 @@ hop_test(settings, ens.runid = NULL, nyear) \arguments{ \item{settings}{SDA PEcAn settings object} +\item{ens.runid}{run id. If not provided, is looked up from [settings$outdir]/runs.txt} + \item{nyear}{number of years to run hop test over} } \value{ diff --git a/modules/assim.sequential/man/interactive.plotting.sda.Rd b/modules/assim.sequential/man/interactive.plotting.sda.Rd index 295cdd18561..35fabdaab05 100644 --- a/modules/assim.sequential/man/interactive.plotting.sda.Rd +++ b/modules/assim.sequential/man/interactive.plotting.sda.Rd @@ -128,8 +128,14 @@ SDA_timeseries_plot( \item{ANALYSIS}{Analysis object from the sda.output.Rdata.} +\item{aqq, bqq}{shape parameters estimated over time for the process covariance} + \item{plot.title}{character giving the title for post visualization ggplots} +\item{facetg}{logical: Create a subpanel for each variable?} + +\item{readsFF}{optional forward forecast} + \item{Add_Map}{Bool variable decide if we want to export the GIS map of Ecoregion.} \item{outdir}{physical path where the pdf will be stored.} diff --git a/modules/assim.sequential/man/sda.enkf.Rd b/modules/assim.sequential/man/sda.enkf.Rd index c3757f0497b..999685cca38 100644 --- a/modules/assim.sequential/man/sda.enkf.Rd +++ b/modules/assim.sequential/man/sda.enkf.Rd @@ -1,20 +1,9 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/sda.enkf.R, R/sda.enkf_refactored.R +% Please edit documentation in R/sda.enkf_refactored.R \name{sda.enkf} \alias{sda.enkf} -\alias{sda.enkf.original} -\title{sda.enkf} +\title{State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter} \usage{ -sda.enkf.original( - settings, - obs.mean, - obs.cov, - IC = NULL, - Q = NULL, - adjustment = TRUE, - restart = NULL -) - sda.enkf( settings, obs.mean, @@ -29,30 +18,38 @@ sda.enkf( \arguments{ \item{settings}{PEcAn settings object} -\item{obs.mean}{List of dataframe of observation means, named with observation datetime.} - -\item{obs.cov}{List of covariance matrices of state variables , named with observation datetime.} +\item{obs.mean}{List of dataframe of observation means, named with +observation datetime.} -\item{IC}{initial conditions} +\item{obs.cov}{List of covariance matrices of state variables , named with +observation datetime.} -\item{Q}{Process covariance matrix given if there is no data to estimate it.} +\item{Q}{Process covariance matrix given if there is no data to +estimate it.} -\item{adjustment}{flag for using ensemble adjustment filter or not} +\item{restart}{Used for iterative updating previous forecasts. When the +restart is TRUE it read the object in SDA folder written from previous +SDA.} -\item{restart}{Used for iterative updating previous forecasts. When the restart is TRUE it read the object in SDA folder written from previous SDA.} +\item{control}{List of flags controlling the behaviour of the SDA. trace +for reporting back the SDA outcomes, interactivePlot for plotting the +outcomes after each step, TimeseriesPlot for post analysis examination, +BiasPlot for plotting the correlation between state variables, plot.title +is the title of post analysis plots and debug mode allows for pausing the +code and examining the variables inside the function.} -\item{control}{List of flags controlling the behaviour of the SDA. trace for reporting back the SDA outcomes, interactivePlot for plotting the outcomes after each step, -TimeseriesPlot for post analysis examination, BiasPlot for plotting the correlation between state variables, plot.title is the title of post analysis plots and debug mode allows for pausing the code and examining the variables inside the function.} +\item{...}{Additional arguments, currently ignored} } \value{ -NONE - NONE } \description{ -State Variable Data Assimilation: Ensemble Kalman Filter - -State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter +Restart mode: Basic idea is that during a restart (primary case + envisioned as an iterative forecast), a new workflow folder is created and + the previous forecast for the start_time is copied over. During restart the + initial run before the loop is skipped, with the info being populated from + the previous run. The function then dives right into the first Analysis, + then continues on like normal. } \author{ Michael Dietze and Ann Raiho \email{dietze@bu.edu} diff --git a/modules/assim.sequential/man/sda.enkf.multisite.Rd b/modules/assim.sequential/man/sda.enkf.multisite.Rd index 9c3560b3e89..81b79f1c1a1 100644 --- a/modules/assim.sequential/man/sda.enkf.multisite.Rd +++ b/modules/assim.sequential/man/sda.enkf.multisite.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/sda.enkf_MultiSite.R \name{sda.enkf.multisite} \alias{sda.enkf.multisite} -\title{sda.enkf.multisite} +\title{State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter} \usage{ sda.enkf.multisite( settings, @@ -46,12 +46,22 @@ sda.enkf.multisite( `forceRun` decide if we want to proceed the Bayesian MCMC sampling without observations; `run_parallel` decide if we want to run the SDA under parallel mode for the `future_map` function; `MCMC.args` include lists for controling the MCMC sampling process (iteration, nchains, burnin, and nthin.).} + +\item{...}{Additional arguments, currently ignored} } \value{ NONE } \description{ -State Variable Data Assimilation: Ensemble Kalman Filter and Generalized ensemble filter. Check out SDA_control function for more details on the control arguments. +Check out SDA_control function for more details on the control arguments. +} +\details{ +Restart mode: Basic idea is that during a restart (primary case envisioned +as an iterative forecast), a new workflow folder is created and the previous +forecast for the start_time is copied over. During restart the initial run +before the loop is skipped, with the info being populated from the previous +run. The function then dives right into the first Analysis, then continues +on like normal. } \author{ Michael Dietze, Ann Raiho and Alexis Helgeson \email{dietze@bu.edu} diff --git a/modules/assim.sequential/man/tobit_model_censored.Rd b/modules/assim.sequential/man/tobit_model_censored.Rd index abab60dfebd..f1067af054e 100644 --- a/modules/assim.sequential/man/tobit_model_censored.Rd +++ b/modules/assim.sequential/man/tobit_model_censored.Rd @@ -16,6 +16,11 @@ tobit_model_censored(settings, X, var.names, mu.f, Pf, t) \item{mu.f}{(numeric) forecast mean values.} \item{Pf}{(numeric) forecast covariance matrix.} + +\item{t}{(numeric) timestep. If t=1, initial values are imputed for zero values in mu.f} +} +\value{ +list with updated mu.f, pf, X, and indication of which y values are censored } \description{ tobit_model_censored diff --git a/modules/assim.sequential/tests/Rcheck_reference.log b/modules/assim.sequential/tests/Rcheck_reference.log index 580fe6dc7b8..2efbc68b8c0 100644 --- a/modules/assim.sequential/tests/Rcheck_reference.log +++ b/modules/assim.sequential/tests/Rcheck_reference.log @@ -20,17 +20,13 @@ * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK * checking package subdirectories ... OK -* checking R files for non-ASCII characters ... WARNING -Found the following file with non-ASCII characters: - load_data_paleon_sda.R -Portable packages must use only ASCII characters in their R code, -except perhaps in comments. -Use \uxxxx escapes for other characters. +* checking R files for non-ASCII characters ... OK * checking R files for syntax errors ... OK * checking whether the package can be loaded ... OK * checking whether the package can be loaded with stated dependencies ... OK diff --git a/modules/benchmark/tests/Rcheck_reference.log b/modules/benchmark/tests/Rcheck_reference.log index 825ed0601a0..26e595f3099 100644 --- a/modules/benchmark/tests/Rcheck_reference.log +++ b/modules/benchmark/tests/Rcheck_reference.log @@ -20,7 +20,8 @@ * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK diff --git a/modules/data.atmosphere/DESCRIPTION b/modules/data.atmosphere/DESCRIPTION index 893b7b1da18..32ecfce8a76 100644 --- a/modules/data.atmosphere/DESCRIPTION +++ b/modules/data.atmosphere/DESCRIPTION @@ -63,6 +63,7 @@ Suggests: foreach, furrr, future, + knitr, mockery, parallel, PEcAn.settings, @@ -79,5 +80,6 @@ License: BSD_3_clause + file LICENSE Copyright: Authors LazyLoad: yes LazyData: FALSE +VignetteBuilder: knitr Encoding: UTF-8 RoxygenNote: 7.3.2 diff --git a/modules/data.atmosphere/R/closest_xy.R b/modules/data.atmosphere/R/closest_xy.R index a9a18442beb..96c02512736 100644 --- a/modules/data.atmosphere/R/closest_xy.R +++ b/modules/data.atmosphere/R/closest_xy.R @@ -1,8 +1,11 @@ ##' Given latitude and longitude coordinates, find NARR x and y indices ##' +##' @param slat,slon site location, in decimal degrees +##' @param infolder path to folder containing infile +##' @param infile pattern to match for filename inside infile. +##' Only the first file matching this pattern AND ending with '.nc' +##' will be used ##' -##' @name closest_xy -##' @title closest_xy ##' @export ##' @author Betsy Cowdery, Ankur Desai closest_xy <- function(slat, slon, infolder, infile) { diff --git a/modules/data.atmosphere/R/download.Ameriflux.R b/modules/data.atmosphere/R/download.Ameriflux.R index 38ab416a08c..e56e63158d1 100644 --- a/modules/data.atmosphere/R/download.Ameriflux.R +++ b/modules/data.atmosphere/R/download.Ameriflux.R @@ -18,6 +18,7 @@ download.Ameriflux.site <- function(site_id) { ##' @param end_date the end date of the data to be downloaded. Format is YYYY-MM-DD (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose +##' @param ... further arguments, currently ignored ##' ##' @author Josh Mantooth, Rob Kooper, Ankur Desai download.Ameriflux <- function(sitename, outfolder, start_date, end_date, diff --git a/modules/data.atmosphere/R/download.AmerifluxLBL.R b/modules/data.atmosphere/R/download.AmerifluxLBL.R index 4fe706a1f33..677266ffa8a 100644 --- a/modules/data.atmosphere/R/download.AmerifluxLBL.R +++ b/modules/data.atmosphere/R/download.AmerifluxLBL.R @@ -20,6 +20,7 @@ ##' @param useremail Used email, should include 'address sign' for code to be functional ##' @param data_product AmeriFlux data product ##' @param data_policy Two possible licenses (based on the site): 'CCBY4.0' or 'LEGACY' +##' @param ... further arguments, currently ignored ##' ##' @examples ##' \dontrun{ diff --git a/modules/data.atmosphere/R/download.ERA5.R b/modules/data.atmosphere/R/download.ERA5.R index 533aeac96fc..87c6e7a9d3f 100644 --- a/modules/data.atmosphere/R/download.ERA5.R +++ b/modules/data.atmosphere/R/download.ERA5.R @@ -30,6 +30,8 @@ #' @return Character vector of file names containing raw, downloaded #' data (invisibly) #' @author Alexey Shiklomanov +#' @md + # ^ tells Roxygen to interpret this fn's doc block as Markdown #' @export #' @examples #' \dontrun{ diff --git a/modules/data.atmosphere/R/download.Fluxnet2015.R b/modules/data.atmosphere/R/download.Fluxnet2015.R index fb98328ee30..80bde7b6714 100644 --- a/modules/data.atmosphere/R/download.Fluxnet2015.R +++ b/modules/data.atmosphere/R/download.Fluxnet2015.R @@ -10,6 +10,8 @@ ##' @param end_date the end date of the data to be downloaded. Format is YYYY-MM-DD (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose +##' @param username login name for Ameriflux +##' @param ... further arguments, currently ignored ##' ##' @author Ankur Desai, based on download.Ameriflux.R by Josh Mantooth, Rob Kooper download.Fluxnet2015 <- function(sitename, outfolder, start_date, end_date, diff --git a/modules/data.atmosphere/R/download.FluxnetLaThuile.R b/modules/data.atmosphere/R/download.FluxnetLaThuile.R index 70134b3604a..44d065a46cb 100644 --- a/modules/data.atmosphere/R/download.FluxnetLaThuile.R +++ b/modules/data.atmosphere/R/download.FluxnetLaThuile.R @@ -18,6 +18,7 @@ download.FluxnetLaThuile.site <- function(site_id) { ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose ##' @param username should be the registered Fluxnet username, else defaults to pecan +##' @param ... further arguments, currently ignored ##' ##' @author Ankur Desai download.FluxnetLaThuile <- function(sitename, outfolder, start_date, end_date, diff --git a/modules/data.atmosphere/R/download.GFDL.R b/modules/data.atmosphere/R/download.GFDL.R index 2f0c975e85d..dd1640ae7ae 100644 --- a/modules/data.atmosphere/R/download.GFDL.R +++ b/modules/data.atmosphere/R/download.GFDL.R @@ -13,6 +13,8 @@ #' @param model Which GFDL model to run (options are CM3, ESM2M, ESM2G) #' @param scenario Which scenario to run (options are rcp26, rcp45, rcp60, rcp85) #' @param ensemble_member Which ensemble_member to initialize the run (options are r1i1p1, r3i1p1, r5i1p1) +#' @param ... further arguments, currently ignored +#' #' @author James Simkins, Alexey Shiklomanov, Ankur Desai download.GFDL <- function(outfolder, start_date, end_date, lat.in, lon.in, overwrite = FALSE, verbose = FALSE, diff --git a/modules/data.atmosphere/R/download.NARR_site.R b/modules/data.atmosphere/R/download.NARR_site.R index 9de280c15d7..bf2fdb1b51d 100644 --- a/modules/data.atmosphere/R/download.NARR_site.R +++ b/modules/data.atmosphere/R/download.NARR_site.R @@ -7,9 +7,12 @@ #' @param lon.in Site longitude coordinate #' @param overwrite Overwrite existing files? Default=FALSE #' @param verbose Turn on verbose output? Default=FALSE +#' @param progress Whether or not to show a progress bar. +#' Requires the `progress` package to be installed. #' @param parallel Download in parallel? Default = TRUE #' @param ncores Number of cores for parallel download. Default is #' `parallel::detectCores()` +#' @param ... further arguments, currently ignored #' #' @examples #' @@ -345,7 +348,7 @@ generate_narr_url <- function(dates, flx) { dplyr::select("startdate", "url") } -#' Assign daygroup tag for a given date +# Assign daygroup tag for a given date daygroup <- function(date, flx) { mday <- lubridate::mday(date) mmax <- lubridate::days_in_month(date) diff --git a/modules/data.atmosphere/R/download.NEONmet.R b/modules/data.atmosphere/R/download.NEONmet.R index f1262701790..be132a1a5ee 100644 --- a/modules/data.atmosphere/R/download.NEONmet.R +++ b/modules/data.atmosphere/R/download.NEONmet.R @@ -12,6 +12,8 @@ ##' @param end_date the end date of the data to be downloaded. Format is YYYY-MM-DD (will only use the year and month part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose makes the function output more text +##' @param ... further arguments, currently ignored +##' ##' @examples ##' \dontrun{ ##' result <- download.NEONmet('HARV','~/','2017-01-01','2017-01-31',overwrite=TRUE) diff --git a/modules/data.atmosphere/R/download.PalEON.R b/modules/data.atmosphere/R/download.PalEON.R index 50a55ffe9c5..3618d2193b7 100644 --- a/modules/data.atmosphere/R/download.PalEON.R +++ b/modules/data.atmosphere/R/download.PalEON.R @@ -20,7 +20,9 @@ download.PalEON <- function(sitename, outfolder, start_date, end_date, overwrite else if (sitename == "Howland Forest- main tower (US-Ho1) (PalEON PHO)") { site <- "PHO" } # 0-759 - else if (sitename == "Billy’s Lake (PalEON PBL)") { + else if (sitename == "Billy\U2019s Lake (PalEON PBL)") { + #\U2019 = curly right single-quote, escaped to keep R from complaining about non-ASCII in code files + # (yes, the curly quote is present in the DB sitename) site <- "PBL" } # 1-672 done else if (sitename == "Deming Lake (PalEON PDL)") { diff --git a/modules/data.atmosphere/R/extract.nc.R b/modules/data.atmosphere/R/extract.nc.R index 3f4651175c5..c2f1f7c5001 100644 --- a/modules/data.atmosphere/R/extract.nc.R +++ b/modules/data.atmosphere/R/extract.nc.R @@ -11,6 +11,8 @@ ##' @param slon the longitude of the site ##' @param overwrite should existing files be overwritten ##' @param verbose should ouput of function be extra verbose +##' @param ... further arguments, currently ignored +##' ##' @export ##' @author Betsy Cowdery extract.nc <- function(in.path, in.prefix, outfolder, start_date, end_date, slat, slon, diff --git a/modules/data.atmosphere/R/extract_ERA5.R b/modules/data.atmosphere/R/extract_ERA5.R index 4bc5a9f05a0..fe5e5b7b5ab 100644 --- a/modules/data.atmosphere/R/extract_ERA5.R +++ b/modules/data.atmosphere/R/extract_ERA5.R @@ -6,13 +6,16 @@ #' @param start_date start date #' @param end_date end date #' @param outfolder Path to directory where nc files need to be saved. -#' @param in.prefix initial portion of the filename that does not vary by date. Does not include directory; specify that as part of in.path. +#' @param in.prefix initial portion of the filename that does not vary by date. +#' Does not include directory; specify that as part of in.path. #' @param newsite site name. -#' @param vars variables to be extracted. If NULL all the variables will be returned. +#' @param vars variables to be extracted. If NULL all the variables will be +#' returned. #' @param overwrite Logical if files needs to be overwritten. #' @param verbose Decide if we want to stop printing info. #' @param ... other inputs. -#' @details For the list of variables check out the documentation at \link{https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation#ERA5datadocumentation-Spatialgrid} +#' @details For the list of variables check out the documentation at \url{ +#' https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation#ERA5datadocumentation-Spatialgrid} #' #' @return a list of xts objects with all the variables for the requested years #' @export diff --git a/modules/data.atmosphere/R/lightME.R b/modules/data.atmosphere/R/lightME.R index e7e6667bdd4..842263a718e 100644 --- a/modules/data.atmosphere/R/lightME.R +++ b/modules/data.atmosphere/R/lightME.R @@ -13,7 +13,7 @@ ##' @param alpha atmospheric transmittance, default 0.85. ##' @export ##' @return a \code{\link{list}} structure with components: -##' \itemize{ +##' \describe{ ##' \item{'I.dir'}{Direct radiation (\eqn{\mu} mol \eqn{m^{-2}s^{-1}}} ##' \item{'I.diff'}{Indirect (diffuse) radiation (\eqn{\mu} mol\eqn{m^{-2}s^{-1}}} ##' \item{'cos.th'}{cosine of \eqn{\theta}, solar zenith angle.} diff --git a/modules/data.atmosphere/R/merge.met.variable.R b/modules/data.atmosphere/R/merge.met.variable.R index 13e8c404fb2..dbd369f3769 100644 --- a/modules/data.atmosphere/R/merge.met.variable.R +++ b/modules/data.atmosphere/R/merge.met.variable.R @@ -1,23 +1,22 @@ #' Merge a new met variable from an external file (e.g. CO2) into existing met files #' +#' Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. +#' Currently unit and name checking only implemented for CO2. +#' Currently does not yet support merge data that has lat/lon +#' New variable only has time dimension and thus MIGHT break downstream code.... +#' #' @param in.path path to original data #' @param in.prefix prefix of original data -#' @param start_date -#' @param end_date +#' @param start_date,end_date date (or character in a standard date format). Only year component is used. #' @param merge.file path of file to be merged in #' @param overwrite logical: replace output file if it already exists? #' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions #' print debugging information as they run? -#' @param ... +#' @param ... other arguments, currently ignored #' #' @return Currently nothing. TODO: Return a data frame summarizing the merged files. #' @export #' -#' @details Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. -#' Currently unit and name checking only implemented for CO2. -#' Currently does not yet support merge data that has lat/lon -#' New variable only has time dimension and thus MIGHT break downstream code.... -#' #' @examples #' \dontrun{ #' in.path <- "~/paleon/PalEONregional_CF_site_1-24047/" diff --git a/modules/data.atmosphere/R/met.process.R b/modules/data.atmosphere/R/met.process.R index 8e1408b3887..3286eb5176a 100644 --- a/modules/data.atmosphere/R/met.process.R +++ b/modules/data.atmosphere/R/met.process.R @@ -19,6 +19,8 @@ ##' *except* raw met downloads. I.e., it corresponds to: ##' ##' list(download = FALSE, met2cf = TRUE, standardize = TRUE, met2model = TRUE) +##' @param browndog login info for the Browndog conversion service, if used. +##' List of `url`, `username`, `password` ##' @importFrom rlang .data .env ##' @export ##' @author Elizabeth Cowdery, Michael Dietze, Ankur Desai, James Simkins, Ryan Kelly diff --git a/modules/data.atmosphere/R/met2CF.ALMA.R b/modules/data.atmosphere/R/met2CF.ALMA.R index 5b47429ece7..d674d071b9c 100644 --- a/modules/data.atmosphere/R/met2CF.ALMA.R +++ b/modules/data.atmosphere/R/met2CF.ALMA.R @@ -20,6 +20,8 @@ insertPmet <- function(vals, nc2, var2, dim2, units2 = NA, conv = NULL, ##' @param start_date the start date of the data to be downloaded (will only use the year part of the date) ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten +##' @param verbose logical: enable verbose mode for netcdf writer functions? +##' @param ... further arguments, currently ignored ##' ##' @author Mike Dietze met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, @@ -179,7 +181,10 @@ met2CF.PalEONregional <- function(in.path, in.prefix, outfolder, start_date, end ##' @param outfolder location on disk where outputs will be stored ##' @param start_date the start date of the data to be downloaded (will only use the year part of the date) ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) +##' @param lat,lon site location in decimal degrees. Caution: both must have length one. ##' @param overwrite should existing files be overwritten +##' @param verbose logical: enable verbose mode for netcdf writer functions? +##' @param ... further arguments, currently ignored ##' ##' @author Mike Dietze met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, lat, lon, overwrite = FALSE, @@ -373,6 +378,7 @@ met2CF.PalEON <- function(in.path, in.prefix, outfolder, start_date, end_date, l ##' @param start_date the start date of the data to be downloaded (will only use the year part of the date) ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten +##' @param verbose logical: enable verbose mode for netcdf writer functions? ##' ##' @author Mike Dietze met2CF.ALMA <- function(in.path, in.prefix, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE) { diff --git a/modules/data.atmosphere/R/met2CF.Ameriflux.R b/modules/data.atmosphere/R/met2CF.Ameriflux.R index 9686b6e7732..88e873d4a3d 100644 --- a/modules/data.atmosphere/R/met2CF.Ameriflux.R +++ b/modules/data.atmosphere/R/met2CF.Ameriflux.R @@ -69,6 +69,7 @@ getLatLon <- function(nc1) { ##' @param end_date the end date of the data to be downloaded (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should ouput of function be extra verbose +##' @param ... further arguments, currently ignored ##' ##' @author Josh Mantooth, Mike Dietze, Elizabeth Cowdery, Ankur Desai met2CF.Ameriflux <- function(in.path, in.prefix, outfolder, start_date, end_date, diff --git a/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R b/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R index abbb81a42c6..69f5c882304 100644 --- a/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R +++ b/modules/data.atmosphere/R/met2CF.AmerifluxLBL.R @@ -29,6 +29,7 @@ ##' @param overwrite should existing files be overwritten ##' @param verbose should ouput of function be extra verbose +##' @param ... further arguments, currently ignored ##' ##' @author Ankur Desai met2CF.AmerifluxLBL <- function(in.path, in.prefix, outfolder, start_date, end_date, format, diff --git a/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R b/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R index 1e573a65706..820028978ba 100644 --- a/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R +++ b/modules/data.atmosphere/R/metgapfill.NOAA_GEFS.R @@ -1,19 +1,21 @@ -##'@title Gapfill NOAA_GEFS weather data -##'@section Purpose: -##'This function uses simple methods to gapfill NOAA GEFS met data -##'Temperature and Precipitation are gapfilled with spline; other data sources are gapfilled with -##'using linear models fitted to other fitted data. -##' -##'@param in.prefix the met file name -##'@param in.path The location of the file -##'@param outfolder The place to write the output file to -##'@param start_date The start date of the contents of the file -##'@param end_date The end date of the contents of the file -##'@param overwrite Whether or not to overwrite the output file if it exists or not -##'@param verbose Passed to nc writing functions for additional output -##'@export -##' -##'@author Luke Dramko +#' Gapfill NOAA_GEFS weather data +#' +#' This function uses simple methods to gapfill NOAA GEFS met data. +#' Temperature and Precipitation are gapfilled with splines; +#' other data sources are gapfilled using linear models fitted to +#' other fitted data. +#' +#' @param in.prefix the met file name +#' @param in.path The location of the file +#' @param outfolder The place to write the output file to +#' @param start_date The start date of the contents of the file +#' @param end_date The end date of the contents of the file +#' @param overwrite Whether or not to overwrite the output file if it exists or not +#' @param verbose Passed to nc writing functions for additional output +#' @param ... further arguments, currently ignored +#' +#' @author Luke Dramko +#' @export metgapfill.NOAA_GEFS <- function(in.prefix, in.path, outfolder, start_date, end_date, overwrite = FALSE, verbose = FALSE, ...) { diff --git a/modules/data.atmosphere/R/metgapfill.R b/modules/data.atmosphere/R/metgapfill.R index db53eebe875..9a4bc711df7 100644 --- a/modules/data.atmosphere/R/metgapfill.R +++ b/modules/data.atmosphere/R/metgapfill.R @@ -13,6 +13,8 @@ ##' @param overwrite should existing files be overwritten ##' @param verbose should the function be very verbose ##' @param lst is timezone offset from UTC, if timezone is available in time:units attribute in file, it will use that, default is to assume UTC +##' @param ... further arguments, currently ignored +##' ##' @author Ankur Desai metgapfill <- function(in.path, in.prefix, outfolder, start_date, end_date, lst = 0, overwrite = FALSE, verbose = FALSE, ...) { diff --git a/modules/data.atmosphere/R/nc_merge.R b/modules/data.atmosphere/R/nc_merge.R index e532e66d69d..963ea9e998f 100644 --- a/modules/data.atmosphere/R/nc_merge.R +++ b/modules/data.atmosphere/R/nc_merge.R @@ -23,6 +23,8 @@ ##' @param overwrite logical: replace output file if it already exists? ##' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} ##' functions print debugging information as they run? +##' @param ... further arguments, currently ignored +##' ##' @export # ----------------------------------- #---------------------------------------------------------------------- diff --git a/modules/data.atmosphere/R/permute.nc.R b/modules/data.atmosphere/R/permute.nc.R index 2ed515968f1..24a3a96c117 100644 --- a/modules/data.atmosphere/R/permute.nc.R +++ b/modules/data.atmosphere/R/permute.nc.R @@ -10,6 +10,7 @@ ##' @param end_date the end date of the data to be permuted (will only use the year part of the date) ##' @param overwrite should existing files be overwritten ##' @param verbose should ouput of function be extra verbose +##' @param ... further arguments, currently ignored ##' ##' @author Elizabeth Cowdery, Rob Kooper permute.nc <- function(in.path, in.prefix, outfolder, start_date, end_date, diff --git a/modules/data.atmosphere/R/split_wind.R b/modules/data.atmosphere/R/split_wind.R index 427d8a57aa4..a43de879ec8 100644 --- a/modules/data.atmosphere/R/split_wind.R +++ b/modules/data.atmosphere/R/split_wind.R @@ -1,9 +1,10 @@ #' Split wind_speed into eastward_wind and northward_wind #' +#' Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. +#' #' @param in.path path to original data #' @param in.prefix prefix of original data -#' @param start_date -#' @param end_date +#' @param start_date,end_date date (or character in a standard date format). Only year component is used. #' @param overwrite logical: replace output file if it already exists? #' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run? #' @param ... other arguments, currently ignored @@ -11,7 +12,6 @@ #' @return nothing. TODO: Return data frame summarizing results #' @export #' -#' @details Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. #' #' @examples #' \dontrun{ diff --git a/modules/data.atmosphere/R/tdm_generate_subdaily_models.R b/modules/data.atmosphere/R/tdm_generate_subdaily_models.R index 35031f492e3..87e45b405d2 100644 --- a/modules/data.atmosphere/R/tdm_generate_subdaily_models.R +++ b/modules/data.atmosphere/R/tdm_generate_subdaily_models.R @@ -1,48 +1,38 @@ -##' Generate Subdaily Models -##' Create statistical models to predict subdaily meteorology -# ----------------------------------- -# Description -# ----------------------------------- -##' -##' @title gen.subdaily.models -##' @family tdm - Temporally Downscale Meteorology -##' @author Christy Rollinson, James Simkins -##' @description This is the 2nd function in the tdm workflow that takes the dat.train_file that is created from the -##' nc2dat.train function and generates "lag.days" and "next.days". These variables pass along information -##' of the previous time step and provides a preview of the next time step. After these variables are created, -##' the models are generated by calling the tdm_temporal_downscale_functions.R scripts and these models -##' and betas are saved separately. Please note that these models and betas require a significant -##' amount of space. The storage required varies by the size of the training dataset, but prepare for -##' >100 GB. These will be called later in tdm_predict_subdaily_met to perform the linear regression -##' analysis. -# ----------------------------------- -# Parameters -# ----------------------------------- -##' @param outfolder - directory where models will be stored *** storage required varies by size of training dataset, but prepare for >10 GB -##' @param path.train - path to CF/PEcAn style training data where each year is in a separate file. -##' @param yrs.train - which years of the training data should be used for to generate the model for -##' the subdaily cycle. If NULL, will default to all years -##' @param direction.filter - Whether the model will be filtered backward or forward in time. options = c("backward", "forward") -##' (PalEON will go backward, anybody interested in the future will go forward) -##' @param in.prefix -##' @param n.beta - number of betas to save from linear regression model -##' @param resids - logical stating whether to pass on residual data or not (this increases both memory & storage requirements) -##' @param parallel - logical stating whether to run temporal_downscale_functions.R in parallel -##' @param n.cores - deals with parallelization -##' @param day.window - integer specifying number of days around the day being modeled you want to use data from for that -##' specific hours coefficients. Must be integer because we want statistics from the same time of day -##' for each day surrounding the model day -##' @param seed - seed for randomization to allow for reproducible results -##' @param overwrite logical: replace output file if it already exists? -##' @param verbose logical, currently ignored -##' @param print.progress - print progress bar? (gets passed through) -##' @export -# ----------------------------------- -#---------------------------------------------------------------------- -# Begin Function -#---------------------------------------------------------------------- - - +#' Generate Subdaily Models +#' +#' Create statistical models to predict subdaily meteorology +#' This is the 2nd function in the tdm workflow that takes the dat.train_file that is created from the +#' nc2dat.train function and generates "lag.days" and "next.days". These variables pass along information +#' of the previous time step and provides a preview of the next time step. After these variables are created, +#' the models are generated by calling the tdm_temporal_downscale_functions.R scripts and these models +#' and betas are saved separately. Please note that these models and betas require a significant +#' amount of space. The storage required varies by the size of the training dataset, but prepare for +#' >100 GB. These will be called later in tdm_predict_subdaily_met to perform the linear regression +#' analysis. +#' +#' @family tdm - Temporally Downscale Meteorology +#' @author Christy Rollinson, James Simkins +#' +#' @param outfolder - directory where models will be stored *** storage required varies by size of training dataset, but prepare for >10 GB +#' @param path.train - path to CF/PEcAn style training data where each year is in a separate file. +#' @param yrs.train - which years of the training data should be used for to generate the model for +#' the subdaily cycle. If NULL, will default to all years +#' @param direction.filter - Whether the model will be filtered backward or forward in time. options = c("backward", "forward") +#' (PalEON will go backward, anybody interested in the future will go forward) +#' @param in.prefix not used +#' @param n.beta - number of betas to save from linear regression model +#' @param resids - logical stating whether to pass on residual data or not (this increases both memory & storage requirements) +#' @param parallel - logical stating whether to run temporal_downscale_functions.R in parallel +#' @param n.cores - deals with parallelization +#' @param day.window - integer specifying number of days around the day being modeled you want to use data from for that +#' specific hours coefficients. Must be integer because we want statistics from the same time of day +#' for each day surrounding the model day +#' @param seed - seed for randomization to allow for reproducible results +#' @param overwrite logical: replace output file if it already exists? +#' @param verbose logical, currently ignored +#' @param print.progress - print progress bar? (gets passed through) +#' @export +#' gen.subdaily.models <- function(outfolder, path.train, yrs.train, direction.filter="forward", in.prefix, n.beta, day.window, seed=Sys.time(), resids = FALSE, parallel = FALSE, n.cores = NULL, overwrite = TRUE, verbose = FALSE, print.progress=FALSE) { diff --git a/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R b/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R index d594666611c..5b7a04e07e5 100644 --- a/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R +++ b/modules/data.atmosphere/R/tdm_lm_ensemble_sims.R @@ -19,6 +19,7 @@ ##' @param path.model - path to where the training model & betas is stored ##' @param direction.filter - Whether the model will be filtered backward or forward in time. options = c("backward", "forward") ##' (PalEON will go backward, anybody interested in the future will go forward) +##' @param lags.list - optional list form of lags.init, with one entry for each unique `ens.day` in dat.mod ##' @param lags.init - a data frame of initialization parameters to match the data in dat.mod ##' @param dat.train - the training data used to fit the model; needed for night/day in ##' surface_downwelling_shortwave_flux_in_air diff --git a/modules/data.atmosphere/R/tdm_model_train.R b/modules/data.atmosphere/R/tdm_model_train.R index 0cff73e411d..03a8060cd82 100644 --- a/modules/data.atmosphere/R/tdm_model_train.R +++ b/modules/data.atmosphere/R/tdm_model_train.R @@ -14,10 +14,13 @@ # Parameters # ----------------------------------- ##' @param dat.subset data.frame containing lags, next, and downscale period data +##' @param v variable name, as character ##' @param n.beta number of betas to pull from ##' @param resids TRUE or FALSE, whether to use residuals or not ##' @param threshold NULL except for surface_downwelling_shortwave_radiation, helps with our ##' distinction between day and night (no shortwave without sunlight) +##' @param ... further arguments, currently ignored +##' ##' @export # ----------------------------------- #---------------------------------------------------------------------- diff --git a/modules/data.atmosphere/R/tdm_predict_subdaily_met.R b/modules/data.atmosphere/R/tdm_predict_subdaily_met.R index 825bd203672..3e4526d8b50 100644 --- a/modules/data.atmosphere/R/tdm_predict_subdaily_met.R +++ b/modules/data.atmosphere/R/tdm_predict_subdaily_met.R @@ -37,6 +37,8 @@ ##' @param verbose logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run? ##' @param print.progress - print the progress bar? ##' @param seed - manually set seed for results to be reproducible +##' @param ... further arguments, currently ignored +##' ##' @export ##' @examples ##' \dontrun{ diff --git a/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R b/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R index 911b1ba4014..76331439a08 100644 --- a/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R +++ b/modules/data.atmosphere/R/tdm_temporal_downscale_functions.R @@ -28,6 +28,8 @@ ##' @param seed - allows this to be reproducible ##' @param outfolder = where the output should be stored ##' @param print.progress - print progress of model generation? +##' @param ... further arguments, currently ignored +##' ##' @export # ----------------------------------- #---------------------------------------------------------------------- diff --git a/modules/data.atmosphere/man/closest_xy.Rd b/modules/data.atmosphere/man/closest_xy.Rd index 1b8e3c17db3..b767af5b5d0 100644 --- a/modules/data.atmosphere/man/closest_xy.Rd +++ b/modules/data.atmosphere/man/closest_xy.Rd @@ -2,10 +2,19 @@ % Please edit documentation in R/closest_xy.R \name{closest_xy} \alias{closest_xy} -\title{closest_xy} +\title{Given latitude and longitude coordinates, find NARR x and y indices} \usage{ closest_xy(slat, slon, infolder, infile) } +\arguments{ +\item{slat, slon}{site location, in decimal degrees} + +\item{infolder}{path to folder containing infile} + +\item{infile}{pattern to match for filename inside infile. +Only the first file matching this pattern AND ending with '.nc' +will be used} +} \description{ Given latitude and longitude coordinates, find NARR x and y indices } diff --git a/modules/data.atmosphere/man/daygroup.Rd b/modules/data.atmosphere/man/daygroup.Rd deleted file mode 100644 index 10dab9e98b6..00000000000 --- a/modules/data.atmosphere/man/daygroup.Rd +++ /dev/null @@ -1,11 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/download.NARR_site.R -\name{daygroup} -\alias{daygroup} -\title{Assign daygroup tag for a given date} -\usage{ -daygroup(date, flx) -} -\description{ -Assign daygroup tag for a given date -} diff --git a/modules/data.atmosphere/man/download.Ameriflux.Rd b/modules/data.atmosphere/man/download.Ameriflux.Rd index 4bb914bcaf1..15091897fc2 100644 --- a/modules/data.atmosphere/man/download.Ameriflux.Rd +++ b/modules/data.atmosphere/man/download.Ameriflux.Rd @@ -27,6 +27,8 @@ The 'SITE_ID' field in \href{http://ameriflux.lbl.gov/sites/site-list-and-pages/ \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{...}{further arguments, currently ignored} } \description{ Download Ameriflux L2 netCDF files diff --git a/modules/data.atmosphere/man/download.AmerifluxLBL.Rd b/modules/data.atmosphere/man/download.AmerifluxLBL.Rd index 8b30000484f..9b8fa2290ad 100644 --- a/modules/data.atmosphere/man/download.AmerifluxLBL.Rd +++ b/modules/data.atmosphere/man/download.AmerifluxLBL.Rd @@ -42,6 +42,8 @@ The 'SITE_ID' field in \href{http://ameriflux.lbl.gov/sites/site-list-and-pages/ \item{data_product}{AmeriFlux data product} \item{data_policy}{Two possible licenses (based on the site): 'CCBY4.0' or 'LEGACY'} + +\item{...}{further arguments, currently ignored} } \description{ download.AmerifluxLBL. Function uses amf_download_base function from amerifluxr package diff --git a/modules/data.atmosphere/man/download.ERA5.old.Rd b/modules/data.atmosphere/man/download.ERA5.old.Rd index d065a061cec..64199c95ae1 100644 --- a/modules/data.atmosphere/man/download.ERA5.old.Rd +++ b/modules/data.atmosphere/man/download.ERA5.old.Rd @@ -20,20 +20,20 @@ download.ERA5.old( \item{outfolder}{Directory where results should be written} \item{start_date, end_date}{Range of years to retrieve. Format is -`YYYY-MM-DD`.} +\code{YYYY-MM-DD}.} \item{lat.in, lon.in}{Site coordinates, decimal degrees (numeric)} -\item{product_types}{Character vector of product types, or `"all"`. -Must be one or more of: `"reanalysis"`, `"ensemble members"`, -`"ensemble mean"`, `"ensemble spread"`} +\item{product_types}{Character vector of product types, or \code{"all"}. +Must be one or more of: \code{"reanalysis"}, \code{"ensemble members"}, +\code{"ensemble mean"}, \code{"ensemble spread"}} -\item{overwrite}{Logical. If `FALSE` (default), skip any files with +\item{overwrite}{Logical. If \code{FALSE} (default), skip any files with the same target name (i.e. same variable) that already exist in -`outfolder`. If `TRUE`, silently overwrite existing files.} +\code{outfolder}. If \code{TRUE}, silently overwrite existing files.} -\item{reticulate_python}{Path to Python binary for `reticulate` -(passed to [reticulate::use_python()]). If `NULL` (default), use +\item{reticulate_python}{Path to Python binary for \code{reticulate} +(passed to \code{\link[reticulate:use_python]{reticulate::use_python()}}). If \code{NULL} (default), use the system default.} \item{...}{Currently unused. Allows soaking up additional arguments @@ -41,21 +41,21 @@ to other methods.} } \value{ Character vector of file names containing raw, downloaded - data (invisibly) +data (invisibly) } \description{ -Link to [full data documentation](https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation). +Link to \href{https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation}{full data documentation}. } \details{ -Under the hood, this function uses the Python `cdsapi` module, -which can be installed via `pip` (`pip install --user cdsapi`). The -module is accessed via the `reticulate` package. +Under the hood, this function uses the Python \code{cdsapi} module, +which can be installed via \code{pip} (\verb{pip install --user cdsapi}). The +module is accessed via the \code{reticulate} package. Using the CDS API requires you to create a free account at https://cds.climate.copernicus.eu. Once you have done that, you will need to configure the CDS API on your local machine by -creating a `${HOME}/.cdsapi` file, as described -[here](https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key). +creating a \verb{$\{HOME\}/.cdsapi} file, as described +\href{https://cds.climate.copernicus.eu/api-how-to#install-the-cds-api-key}{here}. } \examples{ \dontrun{ diff --git a/modules/data.atmosphere/man/download.Fluxnet2015.Rd b/modules/data.atmosphere/man/download.Fluxnet2015.Rd index 80f1e105dfc..72c7f309ee4 100644 --- a/modules/data.atmosphere/man/download.Fluxnet2015.Rd +++ b/modules/data.atmosphere/man/download.Fluxnet2015.Rd @@ -28,6 +28,10 @@ The 'SITE_ID' field in \href{https://fluxnet.org/sites/site-list-and-pages/}{lis \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{username}{login name for Ameriflux} + +\item{...}{further arguments, currently ignored} } \description{ Download Fluxnet 2015 CSV files diff --git a/modules/data.atmosphere/man/download.FluxnetLaThuile.Rd b/modules/data.atmosphere/man/download.FluxnetLaThuile.Rd index d403f5ea0a2..218365ee2e7 100644 --- a/modules/data.atmosphere/man/download.FluxnetLaThuile.Rd +++ b/modules/data.atmosphere/man/download.FluxnetLaThuile.Rd @@ -30,6 +30,8 @@ The 'SITE_ID' field in \href{http://www.fluxdata.org/DataInfo/Dataset\%20Doc\%20 \item{verbose}{should the function be very verbose} \item{username}{should be the registered Fluxnet username, else defaults to pecan} + +\item{...}{further arguments, currently ignored} } \description{ Download Flxunet LaThuile CSV files diff --git a/modules/data.atmosphere/man/download.GFDL.Rd b/modules/data.atmosphere/man/download.GFDL.Rd index c1e5368e525..36f3260e0ef 100644 --- a/modules/data.atmosphere/man/download.GFDL.Rd +++ b/modules/data.atmosphere/man/download.GFDL.Rd @@ -40,6 +40,8 @@ the same name already exists?} \item{scenario}{Which scenario to run (options are rcp26, rcp45, rcp60, rcp85)} \item{ensemble_member}{Which ensemble_member to initialize the run (options are r1i1p1, r3i1p1, r5i1p1)} + +\item{...}{further arguments, currently ignored} } \description{ Download GFDL CMIP5 outputs for a single grid point using OPeNDAP and convert to CF diff --git a/modules/data.atmosphere/man/download.NARR_site.Rd b/modules/data.atmosphere/man/download.NARR_site.Rd index d310c6a2060..0cbdf407772 100644 --- a/modules/data.atmosphere/man/download.NARR_site.Rd +++ b/modules/data.atmosphere/man/download.NARR_site.Rd @@ -33,10 +33,15 @@ download.NARR_site( \item{verbose}{Turn on verbose output? Default=FALSE} +\item{progress}{Whether or not to show a progress bar. +Requires the `progress` package to be installed.} + \item{parallel}{Download in parallel? Default = TRUE} \item{ncores}{Number of cores for parallel download. Default is `parallel::detectCores()`} + +\item{...}{further arguments, currently ignored} } \description{ Download NARR time series for a single site diff --git a/modules/data.atmosphere/man/download.NEONmet.Rd b/modules/data.atmosphere/man/download.NEONmet.Rd index bf9708a9b7c..c8ffa061ae6 100644 --- a/modules/data.atmosphere/man/download.NEONmet.Rd +++ b/modules/data.atmosphere/man/download.NEONmet.Rd @@ -27,6 +27,8 @@ The 4-letter SITE code in \href{https://www.neonscience.org/science-design/fiel \item{overwrite}{should existing files be overwritten} \item{verbose}{makes the function output more text} + +\item{...}{further arguments, currently ignored} } \description{ download.NEONmet diff --git a/modules/data.atmosphere/man/extract.nc.ERA5.Rd b/modules/data.atmosphere/man/extract.nc.ERA5.Rd index 7dd24354abd..d377f131bd4 100644 --- a/modules/data.atmosphere/man/extract.nc.ERA5.Rd +++ b/modules/data.atmosphere/man/extract.nc.ERA5.Rd @@ -32,11 +32,13 @@ extract.nc.ERA5( \item{outfolder}{Path to directory where nc files need to be saved.} -\item{in.prefix}{initial portion of the filename that does not vary by date. Does not include directory; specify that as part of in.path.} +\item{in.prefix}{initial portion of the filename that does not vary by date. +Does not include directory; specify that as part of in.path.} \item{newsite}{site name.} -\item{vars}{variables to be extracted. If NULL all the variables will be returned.} +\item{vars}{variables to be extracted. If NULL all the variables will be +returned.} \item{overwrite}{Logical if files needs to be overwritten.} @@ -51,7 +53,8 @@ a list of xts objects with all the variables for the requested years ERA5_extract } \details{ -For the list of variables check out the documentation at \link{https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation#ERA5datadocumentation-Spatialgrid} +For the list of variables check out the documentation at \url{ + https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation#ERA5datadocumentation-Spatialgrid} } \examples{ \dontrun{ diff --git a/modules/data.atmosphere/man/extract.nc.Rd b/modules/data.atmosphere/man/extract.nc.Rd index 8fe057801f0..ef9a4868698 100644 --- a/modules/data.atmosphere/man/extract.nc.Rd +++ b/modules/data.atmosphere/man/extract.nc.Rd @@ -35,6 +35,8 @@ extract.nc( \item{overwrite}{should existing files be overwritten} \item{verbose}{should ouput of function be extra verbose} + +\item{...}{further arguments, currently ignored} } \description{ Given latitude and longitude coordinates, extract site data from NARR file diff --git a/modules/data.atmosphere/man/gen.subdaily.models.Rd b/modules/data.atmosphere/man/gen.subdaily.models.Rd index 872183e9514..3c77bc657c9 100644 --- a/modules/data.atmosphere/man/gen.subdaily.models.Rd +++ b/modules/data.atmosphere/man/gen.subdaily.models.Rd @@ -2,7 +2,7 @@ % Please edit documentation in R/tdm_generate_subdaily_models.R \name{gen.subdaily.models} \alias{gen.subdaily.models} -\title{gen.subdaily.models} +\title{Generate Subdaily Models} \usage{ gen.subdaily.models( outfolder, @@ -32,7 +32,7 @@ the subdaily cycle. If NULL, will default to all years} \item{direction.filter}{- Whether the model will be filtered backward or forward in time. options = c("backward", "forward") (PalEON will go backward, anybody interested in the future will go forward)} -\item{in.prefix}{} +\item{in.prefix}{not used} \item{n.beta}{- number of betas to save from linear regression model} @@ -55,6 +55,7 @@ for each day surrounding the model day} \item{print.progress}{- print progress bar? (gets passed through)} } \description{ +Create statistical models to predict subdaily meteorology This is the 2nd function in the tdm workflow that takes the dat.train_file that is created from the nc2dat.train function and generates "lag.days" and "next.days". These variables pass along information of the previous time step and provides a preview of the next time step. After these variables are created, @@ -64,10 +65,6 @@ This is the 2nd function in the tdm workflow that takes the dat.train_file that >100 GB. These will be called later in tdm_predict_subdaily_met to perform the linear regression analysis. } -\details{ -Generate Subdaily Models -Create statistical models to predict subdaily meteorology -} \seealso{ Other tdm - Temporally Downscale Meteorology: \code{\link{lm_ensemble_sims}()}, diff --git a/modules/data.atmosphere/man/lightME.Rd b/modules/data.atmosphere/man/lightME.Rd index 8bddbd3eeab..ad9042e79fc 100644 --- a/modules/data.atmosphere/man/lightME.Rd +++ b/modules/data.atmosphere/man/lightME.Rd @@ -21,7 +21,7 @@ lightME(lat = 40, DOY = 190, t.d = 12, t.sn = 12, atm.P = 1e+05, alpha = 0.85) } \value{ a \code{\link{list}} structure with components: -\itemize{ +\describe{ \item{'I.dir'}{Direct radiation (\eqn{\mu} mol \eqn{m^{-2}s^{-1}}} \item{'I.diff'}{Indirect (diffuse) radiation (\eqn{\mu} mol\eqn{m^{-2}s^{-1}}} \item{'cos.th'}{cosine of \eqn{\theta}, solar zenith angle.} diff --git a/modules/data.atmosphere/man/lm_ensemble_sims.Rd b/modules/data.atmosphere/man/lm_ensemble_sims.Rd index 72fc86a8fe6..18e9841e98f 100644 --- a/modules/data.atmosphere/man/lm_ensemble_sims.Rd +++ b/modules/data.atmosphere/man/lm_ensemble_sims.Rd @@ -30,6 +30,8 @@ lm_ensemble_sims( \item{direction.filter}{- Whether the model will be filtered backward or forward in time. options = c("backward", "forward") (PalEON will go backward, anybody interested in the future will go forward)} +\item{lags.list}{- optional list form of lags.init, with one entry for each unique `ens.day` in dat.mod} + \item{lags.init}{- a data frame of initialization parameters to match the data in dat.mod} \item{dat.train}{- the training data used to fit the model; needed for night/day in diff --git a/modules/data.atmosphere/man/merge_met_variable.Rd b/modules/data.atmosphere/man/merge_met_variable.Rd index c310eb7689b..66821bf6bb7 100644 --- a/modules/data.atmosphere/man/merge_met_variable.Rd +++ b/modules/data.atmosphere/man/merge_met_variable.Rd @@ -20,9 +20,7 @@ merge_met_variable( \item{in.prefix}{prefix of original data} -\item{start_date}{} - -\item{end_date}{} +\item{start_date, end_date}{date (or character in a standard date format). Only year component is used.} \item{merge.file}{path of file to be merged in} @@ -31,15 +29,12 @@ merge_met_variable( \item{verbose}{logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run?} -\item{...}{} +\item{...}{other arguments, currently ignored} } \value{ Currently nothing. TODO: Return a data frame summarizing the merged files. } \description{ -Merge a new met variable from an external file (e.g. CO2) into existing met files -} -\details{ Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. Currently unit and name checking only implemented for CO2. Currently does not yet support merge data that has lat/lon diff --git a/modules/data.atmosphere/man/met.process.Rd b/modules/data.atmosphere/man/met.process.Rd index 7aee712c518..d2a1641da01 100644 --- a/modules/data.atmosphere/man/met.process.Rd +++ b/modules/data.atmosphere/man/met.process.Rd @@ -35,6 +35,9 @@ met.process( \item{dir}{directory to write outputs to} +\item{browndog}{login info for the Browndog conversion service, if used. +List of `url`, `username`, `password`} + \item{spin}{spin-up settings passed to model-specific met2model. List containing nyear (number of years of spin-up), nsample (first n years to cycle), and resample (TRUE/FALSE)} \item{overwrite}{Whether to force met.process to proceed. diff --git a/modules/data.atmosphere/man/met2CF.ALMA.Rd b/modules/data.atmosphere/man/met2CF.ALMA.Rd index fde57db76b3..88481bd6e1d 100644 --- a/modules/data.atmosphere/man/met2CF.ALMA.Rd +++ b/modules/data.atmosphere/man/met2CF.ALMA.Rd @@ -26,6 +26,8 @@ met2CF.ALMA( \item{end_date}{the end date of the data to be downloaded (will only use the year part of the date)} \item{overwrite}{should existing files be overwritten} + +\item{verbose}{logical: enable verbose mode for netcdf writer functions?} } \description{ Get meteorology variables from ALMA netCDF files and convert to netCDF CF format diff --git a/modules/data.atmosphere/man/met2CF.Ameriflux.Rd b/modules/data.atmosphere/man/met2CF.Ameriflux.Rd index 0e054519cef..96bbf1c72ca 100644 --- a/modules/data.atmosphere/man/met2CF.Ameriflux.Rd +++ b/modules/data.atmosphere/man/met2CF.Ameriflux.Rd @@ -29,6 +29,8 @@ met2CF.Ameriflux( \item{overwrite}{should existing files be overwritten} \item{verbose}{should ouput of function be extra verbose} + +\item{...}{further arguments, currently ignored} } \description{ Get meteorology variables from Ameriflux L2 netCDF files and convert to netCDF CF format diff --git a/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd b/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd index b37b4c2478f..9ea8eeeca6b 100644 --- a/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd +++ b/modules/data.atmosphere/man/met2CF.AmerifluxLBL.Rd @@ -49,6 +49,8 @@ Units for datetime field are the lubridate function that will be used to parse t \item{overwrite}{should existing files be overwritten} \item{verbose}{should ouput of function be extra verbose} + +\item{...}{further arguments, currently ignored} } \description{ Get meteorology variables from Ameriflux LBL and convert to netCDF CF format diff --git a/modules/data.atmosphere/man/met2CF.PalEON.Rd b/modules/data.atmosphere/man/met2CF.PalEON.Rd index 6879078c28a..565c14cfff0 100644 --- a/modules/data.atmosphere/man/met2CF.PalEON.Rd +++ b/modules/data.atmosphere/man/met2CF.PalEON.Rd @@ -28,7 +28,13 @@ met2CF.PalEON( \item{end_date}{the end date of the data to be downloaded (will only use the year part of the date)} +\item{lat, lon}{site location in decimal degrees. Caution: both must have length one.} + \item{overwrite}{should existing files be overwritten} + +\item{verbose}{logical: enable verbose mode for netcdf writer functions?} + +\item{...}{further arguments, currently ignored} } \description{ Get meteorology variables from PalEON netCDF files and convert to netCDF CF format diff --git a/modules/data.atmosphere/man/met2CF.PalEONregional.Rd b/modules/data.atmosphere/man/met2CF.PalEONregional.Rd index 946032482ba..dcdad2904c3 100644 --- a/modules/data.atmosphere/man/met2CF.PalEONregional.Rd +++ b/modules/data.atmosphere/man/met2CF.PalEONregional.Rd @@ -27,6 +27,10 @@ met2CF.PalEONregional( \item{end_date}{the end date of the data to be downloaded (will only use the year part of the date)} \item{overwrite}{should existing files be overwritten} + +\item{verbose}{logical: enable verbose mode for netcdf writer functions?} + +\item{...}{further arguments, currently ignored} } \description{ Get meteorology variables from PalEON netCDF files and convert to netCDF CF format diff --git a/modules/data.atmosphere/man/metgapfill.NOAA_GEFS.Rd b/modules/data.atmosphere/man/metgapfill.NOAA_GEFS.Rd index 196a6394990..c03113329a8 100644 --- a/modules/data.atmosphere/man/metgapfill.NOAA_GEFS.Rd +++ b/modules/data.atmosphere/man/metgapfill.NOAA_GEFS.Rd @@ -29,17 +29,15 @@ metgapfill.NOAA_GEFS( \item{overwrite}{Whether or not to overwrite the output file if it exists or not} \item{verbose}{Passed to nc writing functions for additional output} + +\item{...}{further arguments, currently ignored} } \description{ -Gapfill NOAA_GEFS weather data -} -\section{Purpose}{ - -This function uses simple methods to gapfill NOAA GEFS met data -Temperature and Precipitation are gapfilled with spline; other data sources are gapfilled with -using linear models fitted to other fitted data. +This function uses simple methods to gapfill NOAA GEFS met data. +Temperature and Precipitation are gapfilled with splines; + other data sources are gapfilled using linear models fitted to + other fitted data. } - \author{ Luke Dramko } diff --git a/modules/data.atmosphere/man/metgapfill.Rd b/modules/data.atmosphere/man/metgapfill.Rd index 5c96abb1525..92507769909 100644 --- a/modules/data.atmosphere/man/metgapfill.Rd +++ b/modules/data.atmosphere/man/metgapfill.Rd @@ -36,6 +36,8 @@ metgapfill( \item{overwrite}{should existing files be overwritten} \item{verbose}{should the function be very verbose} + +\item{...}{further arguments, currently ignored} } \description{ Take an Ameriflux NetCDF file diff --git a/modules/data.atmosphere/man/model.train.Rd b/modules/data.atmosphere/man/model.train.Rd index eea0a3fec51..47e760df95b 100644 --- a/modules/data.atmosphere/man/model.train.Rd +++ b/modules/data.atmosphere/man/model.train.Rd @@ -9,12 +9,16 @@ model.train(dat.subset, v, n.beta, resids = resids, threshold = NULL, ...) \arguments{ \item{dat.subset}{data.frame containing lags, next, and downscale period data} +\item{v}{variable name, as character} + \item{n.beta}{number of betas to pull from} \item{resids}{TRUE or FALSE, whether to use residuals or not} \item{threshold}{NULL except for surface_downwelling_shortwave_radiation, helps with our distinction between day and night (no shortwave without sunlight)} + +\item{...}{further arguments, currently ignored} } \description{ Function to create linear regression models for specific met diff --git a/modules/data.atmosphere/man/nc.merge.Rd b/modules/data.atmosphere/man/nc.merge.Rd index 75329f45c33..dbed3d19330 100644 --- a/modules/data.atmosphere/man/nc.merge.Rd +++ b/modules/data.atmosphere/man/nc.merge.Rd @@ -33,6 +33,8 @@ nc.merge( \item{verbose}{logical: should \code{\link[ncdf4:ncdf4-package]{ncdf4}} functions print debugging information as they run?} + +\item{...}{further arguments, currently ignored} } \description{ This is the 1st function for the tdm (Temporally Downscale Meteorology) workflow. The nc2dat.train function diff --git a/modules/data.atmosphere/man/permute.nc.Rd b/modules/data.atmosphere/man/permute.nc.Rd index eeafd16fb10..6b276dbdf22 100644 --- a/modules/data.atmosphere/man/permute.nc.Rd +++ b/modules/data.atmosphere/man/permute.nc.Rd @@ -29,6 +29,8 @@ permute.nc( \item{overwrite}{should existing files be overwritten} \item{verbose}{should ouput of function be extra verbose} + +\item{...}{further arguments, currently ignored} } \description{ Permute netCDF files diff --git a/modules/data.atmosphere/man/predict_subdaily_met.Rd b/modules/data.atmosphere/man/predict_subdaily_met.Rd index 09a98b8ce0c..18453131757 100644 --- a/modules/data.atmosphere/man/predict_subdaily_met.Rd +++ b/modules/data.atmosphere/man/predict_subdaily_met.Rd @@ -60,6 +60,8 @@ ensemble rather than overwriting with a default naming scheme} \item{seed}{- manually set seed for results to be reproducible} \item{print.progress}{- print the progress bar?} + +\item{...}{further arguments, currently ignored} } \description{ This is the main function of the tdm family workflow. This function predicts subdaily meteorology diff --git a/modules/data.atmosphere/man/split_wind.Rd b/modules/data.atmosphere/man/split_wind.Rd index 21482185528..02747a03110 100644 --- a/modules/data.atmosphere/man/split_wind.Rd +++ b/modules/data.atmosphere/man/split_wind.Rd @@ -19,9 +19,7 @@ split_wind( \item{in.prefix}{prefix of original data} -\item{start_date}{} - -\item{end_date}{} +\item{start_date, end_date}{date (or character in a standard date format). Only year component is used.} \item{overwrite}{logical: replace output file if it already exists?} @@ -33,9 +31,6 @@ split_wind( nothing. TODO: Return data frame summarizing results } \description{ -Split wind_speed into eastward_wind and northward_wind -} -\details{ Currently modifies the files IN PLACE rather than creating a new copy of the files an a new DB record. } \examples{ diff --git a/modules/data.atmosphere/man/temporal.downscale.functions.Rd b/modules/data.atmosphere/man/temporal.downscale.functions.Rd index 668fe14e60a..654fc66d6d4 100644 --- a/modules/data.atmosphere/man/temporal.downscale.functions.Rd +++ b/modules/data.atmosphere/man/temporal.downscale.functions.Rd @@ -37,6 +37,8 @@ still being worked on, set to FALSE} \item{outfolder}{= where the output should be stored} \item{print.progress}{- print progress of model generation?} + +\item{...}{further arguments, currently ignored} } \description{ This function contains the functions that do the heavy lifting in gen.subdaily.models() diff --git a/modules/data.atmosphere/tests/Rcheck_reference.log b/modules/data.atmosphere/tests/Rcheck_reference.log index 855436fb1b1..e68a8cf8073 100644 --- a/modules/data.atmosphere/tests/Rcheck_reference.log +++ b/modules/data.atmosphere/tests/Rcheck_reference.log @@ -24,17 +24,13 @@ use conditionally. * checking installed package size ... OK * checking package directory ... OK * checking for future file timestamps ... OK -* checking DESCRIPTION meta-information ... OK +* checking DESCRIPTION meta-information ... NOTE +License stub is invalid DCF. * checking top-level files ... OK * checking for left-over files ... OK * checking index information ... OK * checking package subdirectories ... OK -* checking R files for non-ASCII characters ... WARNING -Found the following file with non-ASCII characters: - download.PalEON.R -Portable packages must use only ASCII characters in their R code, -except perhaps in comments. -Use \uxxxx escapes for other characters. +* checking R files for non-ASCII characters ... OK * checking R files for syntax errors ... OK * checking whether the package can be loaded ... OK * checking whether the package can be loaded with stated dependencies ... OK @@ -63,11 +59,7 @@ Undefined global functions or variables: * checking Rd files ... OK * checking Rd metadata ... OK * checking Rd line widths ... OK -* checking Rd cross-references ... WARNING -Missing link or links in documentation object 'extract.nc.ERA5.Rd': - ‘https://confluence.ecmwf.int/display/CKB/ERA5+data+documentation#ERA5datadocumentation-Spatialgrid’ - -See section 'Cross-references' in the 'Writing R Extensions' manual. +* checking Rd cross-references ... OK * checking for missing documentation entries ... WARNING Undocumented data sets: ‘FLUXNET.sitemap’ ‘cruncep_landmask’ ‘cruncep’ ‘ebifarm’ ‘narr’ @@ -76,94 +68,8 @@ All user-level objects in a package should have documentation entries. See chapter ‘Writing R documentation files’ in the ‘Writing R Extensions’ manual. * checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... WARNING -Undocumented arguments in documentation object 'closest_xy' - ‘slat’ ‘slon’ ‘infolder’ ‘infile’ - -Undocumented arguments in documentation object 'daygroup' - ‘date’ ‘flx’ - -Undocumented arguments in documentation object 'download.Ameriflux' - ‘...’ - -Undocumented arguments in documentation object 'download.AmerifluxLBL' - ‘...’ - -Undocumented arguments in documentation object 'download.Fluxnet2015' - ‘username’ ‘...’ - -Undocumented arguments in documentation object 'download.FluxnetLaThuile' - ‘...’ - -Undocumented arguments in documentation object 'download.GFDL' - ‘...’ - -Undocumented arguments in documentation object 'download.NARR_site' - ‘progress’ ‘...’ - -Undocumented arguments in documentation object 'download.NEONmet' - ‘...’ - -Undocumented arguments in documentation object 'extract.nc' - ‘...’ - -Undocumented arguments in documentation object 'lm_ensemble_sims' - ‘lags.list’ - -Undocumented arguments in documentation object 'met.process' - ‘browndog’ - -Undocumented arguments in documentation object 'met2CF.ALMA' - ‘verbose’ - -Undocumented arguments in documentation object 'met2CF.Ameriflux' - ‘...’ - -Undocumented arguments in documentation object 'met2CF.AmerifluxLBL' - ‘...’ - -Undocumented arguments in documentation object 'met2CF.PalEON' - ‘lat’ ‘lon’ ‘verbose’ ‘...’ - -Undocumented arguments in documentation object 'met2CF.PalEONregional' - ‘verbose’ ‘...’ - -Undocumented arguments in documentation object 'metgapfill.NOAA_GEFS' - ‘...’ - -Undocumented arguments in documentation object 'metgapfill' - ‘...’ - -Undocumented arguments in documentation object 'model.train' - ‘v’ ‘...’ - -Undocumented arguments in documentation object 'nc.merge' - ‘...’ - -Undocumented arguments in documentation object 'permute.nc' - ‘...’ - -Undocumented arguments in documentation object 'predict_subdaily_met' - ‘...’ - -Undocumented arguments in documentation object 'temporal.downscale.functions' - ‘...’ - -Functions with \usage entries need to have the appropriate \alias -entries, and all their arguments documented. -The \usage entries must correspond to syntactically valid R code. -See chapter ‘Writing R documentation files’ in the ‘Writing R -Extensions’ manual. -* checking Rd contents ... WARNING -Argument items with no description in Rd object 'gen.subdaily.models': - ‘in.prefix’ - -Argument items with no description in Rd object 'merge_met_variable': - ‘start_date’ ‘end_date’ ‘...’ - -Argument items with no description in Rd object 'split_wind': - ‘start_date’ ‘end_date’ - +* checking Rd \usage sections ... OK +* checking Rd contents ... OK * checking for unstated dependencies in examples ... OK * checking contents of ‘data’ directory ... OK * checking data for non-ASCII characters ... OK @@ -174,11 +80,7 @@ Argument items with no description in Rd object 'split_wind': old_size new_size compress cruncep_landmask.RData 39Kb 9Kb xz narr_cruncep_ebifarm.RData 790Kb 597Kb xz -* checking files in ‘vignettes’ ... WARNING -Files in the 'vignettes' directory but no files in 'inst/doc': - ‘ameriflux_demo.Rmd’, ‘cfmet_downscaling.Rmd’, - ‘compare_narr_cruncep_met.Rmd’, ‘tdm_downscaling.Rmd’ -Package has no Sweave vignette sources and no VignetteBuilder field. +* checking files in ‘vignettes’ ... OK * checking examples ... OK * checking for unstated dependencies in ‘tests’ ... OK * checking tests ... OK @@ -187,4 +89,4 @@ Package has no Sweave vignette sources and no VignetteBuilder field. * checking for non-standard things in the check directory ... OK * checking for detritus in the temp directory ... OK * DONE -Status: 7 WARNINGs, 2 NOTEs +Status: 2 WARNINGs, 3 NOTEs diff --git a/modules/data.atmosphere/vignettes/ameriflux_demo.Rmd b/modules/data.atmosphere/vignettes/ameriflux_demo.Rmd index 3d23e6e4a41..df568a83c15 100644 --- a/modules/data.atmosphere/vignettes/ameriflux_demo.Rmd +++ b/modules/data.atmosphere/vignettes/ameriflux_demo.Rmd @@ -2,7 +2,10 @@ title: "PEcAn: Importing Met data from Bondville, IL Ameriflux station" author: "David LeBauer" date: "4/28/2015" -output: html_document +output: html_vignette +vignette: > + %\VignetteIndexEntry{PEcAn: Importing Met data from Bondville, IL Ameriflux station} + %\VignetteEngine{knitr::rmarkdown} --- @@ -19,7 +22,7 @@ The PEcAn.data.atmosphere source code is in [`modules/data.atmosphere`](https:// ```{r} library(knitr) library(ggplot2) -library(ggthemes) +# library(ggthemes) library(PEcAn.data.atmosphere) ``` @@ -35,7 +38,7 @@ knitr::opts_chunk$set(message = FALSE, warnings = FALSE, cache = FALSE, ## Download Ameriflux data for Bondville -```{r download} +```{r download,eval=FALSE} download.Ameriflux(sitename = "US-Bo1", outfolder = "/tmp/", start_date = "1996-01-01", end_date = "2008-04-10") @@ -43,7 +46,7 @@ download.Ameriflux(sitename = "US-Bo1", outfolder = "/tmp/", ## Convert to PEcAn-CF format -```{r met2cf} +```{r met2cf, eval=FALSE} met2CF.Ameriflux(in.path = "/tmp/", in.prefix = "US-Bo1", outfolder = "/tmp/out/", start_date = "1996-01-01", end_date = "2008-04-10") @@ -63,14 +66,14 @@ system("ncrcat -O -h /tmp/out/US-Bo1.199[6789].nc /tmp/out/US-Bo1.200[12348678]. Using the `load.cfmet` convenience function. Ameriflux is provided at 30 min intervals. If needed at a finer resolution, see `?cfmet.downscale.time` (which works with subdaily and daily data). There is no `cfmet.upscale.time` function, but would be straightforward to implement if needed. -```{r load-data} +```{r load-data, eval=FALSE} bondville.nc <- nc_open("/tmp/out/US-Bo11996-2008.nc") bondville.cfmet <- load.cfmet(bondville.nc, lat = 40.0061988830566, lon = -88.290397644043, start.date = "1996-08-25", end.date = "2008-04-10")[!is.na(air_pressure)] ``` -```{r} +```{r, eval=FALSE} theme_set(theme_tufte()) p1 <- ggplot() + geom_line(data = bondville.cfmet, aes(x = date, y = surface_downwelling_shortwave_flux_in_air)) + ylab(paste(bondville.nc$var$surface_downwelling_shortwave_flux_in_air$longname, bondville.nc$var$surface_downwelling_shortwave_flux_in_air$units)) @@ -110,13 +113,13 @@ plots <- list(p1, p2, p3, p4, p5, p6, p7, p8, p9) ## Plot entire time series -```{r long-time, echo=FALSE} +```{r long-time, echo=FALSE, eval=FALSE} lapply(plots, print) ``` ## Plot 8-26-1996 to 10-14-1996 -```{r two-months, echo=FALSE} +```{r two-months, echo=FALSE, eval=FALSE} lapply(plots, function(x) x + xlim(ymd_hms(c("1996-08-26 18:29:00 UTC", "1996-10-14 18:29:00 UTC")))) ``` diff --git a/modules/data.atmosphere/vignettes/cfmet_downscaling.Rmd b/modules/data.atmosphere/vignettes/cfmet_downscaling.Rmd index 1c958ee8ae2..ce0851a2cd6 100644 --- a/modules/data.atmosphere/vignettes/cfmet_downscaling.Rmd +++ b/modules/data.atmosphere/vignettes/cfmet_downscaling.Rmd @@ -1,3 +1,11 @@ +--- +title: "Met Downscaling" +output: html_vignette +vignette: > + %\VignetteIndexEntry{Met Downscaling} + %\VignetteEngine{knitr::rmarkdown} +--- + Met Downscaling =============== @@ -9,16 +17,16 @@ examples: * CRU-NCEP 6 hourly ### Extract - -```{r} +TODO: urbana_subdaily_test now lives in the test folder, not extdata +```{r, eval=FALSE} library(PEcAn.data.atmosphere) -subdaily.nc <- nc_open(system.file("extdata/urbana_subdaily_test.nc", package = "PEcAn.data.atmosphere")) +subdaily.nc <- ncdf4::nc_open(system.file("extdata/urbana_subdaily_test.nc", package = "PEcAn.data.atmosphere")) subdaily.cf <- load.cfmet(met.nc = subdaily.nc, lat = 39.75, lon = -87.25, start.date = "1979-01-01", end.date = "1979-06-30") ``` ### Downscale -```{r} +```{r, eval = FALSE} hourly.cf <- cfmet.downscale.time(cfmet = subdaily.cf) diff --git a/modules/data.atmosphere/vignettes/compare_narr_cruncep_met.Rmd b/modules/data.atmosphere/vignettes/compare_narr_cruncep_met.Rmd index 23487a535ee..efaeffb6f12 100644 --- a/modules/data.atmosphere/vignettes/compare_narr_cruncep_met.Rmd +++ b/modules/data.atmosphere/vignettes/compare_narr_cruncep_met.Rmd @@ -1,7 +1,17 @@ +--- +title: "Comparing met data from various sources" +output: html_vignette +vignette: > + %\VignetteIndexEntry{Comparing met data from various sources} + %\VignetteEngine{knitr::rmarkdown} +--- Comparing met data from various sources ======================================================== +(All code chunks are set to eval=FALSE because vignette building was throwing errors. +TODO: Debug and re-enable all chunks) + ## Sources: * `ebifarm` local met station *Data* @@ -30,9 +40,9 @@ Comparing 'data' (ebifarm) with gridded products TODO: clean up figure titles, labels, write explanations -```{r loading-libraries} +```{r loading-libraries, eval=FALSE} library(PEcAn.data.atmosphere) -library(data.table) +# library(data.table) library(ggplot2) theme_set(theme_bw()) data(narr_cruncep_ebifarm) @@ -47,7 +57,7 @@ These data are on biocluster.igb.illinois.edu, most 10-100s GB. Scripts used to download and convert these data to PEcAn CF format, optimized for time series extraction, are on [GitHub ebimodeling/model-drivers](https://github.com/ebimodeling/model-drivers). -```sh +```{sh, eval=FALSE} mkdir ~/testmet/ ncks -O -d lon,-76.75,-76.25 -d lat,2.75,3.25 /home/groups/ebimodeling/met/narr/threehourly_32km/1979_2013.nc ~/testmet/narr32km_champaign.nc @@ -101,7 +111,7 @@ narr3h$source <- "narr3h" ebifarm$source <- "ebifarm" ``` -```{r reorder-met} +```{r reorder-met, eval=FALSE} met <- rbind(cruncep[,list(source, date, temp = DailyTemp.C, RH, wind = WindSpeed, precip, solar = solarR)], narr[,list(source, date, temp = Temp, RH, wind = WS, precip, solar = SolarR)], narr3h[,list(source, date, temp = DailyTemp.C, RH, wind = WindSpeed, precip, solar = solarR)], @@ -114,7 +124,7 @@ met$source <- factor(met$source, ### Solar Radiation (PAR) vs Temp -```{r solar-v-temp} +```{r solar-v-temp, eval=FALSE} ggplot() + geom_point(data = met, aes(solar, temp, color = month(date)), alpha = 0.1) + facet_wrap(~source, nrow=1) + @@ -127,7 +137,7 @@ ggplot() + geom_point(data = met, aes(solar, temp, color = month(date)), alpha = ### RH vs Temp -```{r RH-v-Temp} +```{r RH-v-Temp, eval=FALSE} ggplot() + geom_point(data = met, aes(RH, temp, color = month(date)), alpha = 0.1) + facet_wrap(~source, nrow=1) + @@ -137,7 +147,7 @@ ggplot() + geom_point(data = met, aes(RH, temp, color = month(date)), alpha = 0. ### Solar Radiation and Precipitation: NARR daily vs 3 hourly -```{r par-v-precip} +```{r par-v-precip, eval=FALSE} ggplot() + geom_point(data = met[solar > 1 & precip > 0.1], aes(solar, precip, color = month(date)), alpha = 0.1) + facet_wrap(~source, nrow=1) + @@ -148,7 +158,7 @@ ggplot() + geom_point(data = met[solar > 1 & precip > 0.1], aes(solar, precip, c ### Precipitation v Temperature -```{r precip-v-temp} +```{r precip-v-temp, eval=FALSE} ggplot() + geom_point(data = met, aes(precip, temp, color = month(date)), alpha = 0.1) + facet_wrap(~source, nrow=1) + @@ -160,7 +170,7 @@ ggplot() + geom_point(data = met, aes(precip, temp, color = month(date)), alpha ### Compare Solar Radiation -```{r solar} +```{r solar, eval=FALSE} s <- met[,list(date, day = yday(date), solar, source )] s <- s[,list(date = min(date), solar = max(solar)), by = 'day,source'] @@ -182,7 +192,7 @@ ggplot() + geom_point(data = met[month(date) >5 & month(date)<9 & solar > 100], ### Max Solar Radiation for June 1-Aug31 2010 -```{r max-solar-plot} +```{r max-solar-plot, eval=FALSE} maxsolarplot <- ggplot() + geom_line(data = s, aes(date, solar, color = source)) + xlim(ymd("2010-06-01"), ymd("2010-08-31")) + ggtitle("Max Daily PAR") @@ -192,7 +202,7 @@ print(maxsolarplot) ### Max Solar Radiation (PAR) Model v OBS -```{r create-plots, fig.height = 3, fig.width = 12} +```{r create-plots, fig.height = 3, fig.width = 12, eval=FALSE} maxsolar <- allsolar[,list(obs=max(obs),cruncep=max(cruncep), narr = max(narr), narr3h=max(narr3h), date = min(date)), by = day] narrsolar <- ggplot() + geom_point(data = maxsolar, aes(obs, narr, color = month(date)), alpha = 0.3)+ scale_color_gradientn(colours = c("Red", "Orange", "Yellow", "Green", "Blue"))+ geom_line(aes(0:2000, 0:2000)) + xlim(c(0,2100)) + ylim(c(0,2100)) @@ -218,7 +228,7 @@ gridExtra::grid.arrange( ### PAR residuals (model - obs) -```{r solarresid-plot} +```{r solarresid-plot, eval=FALSE} solarresiduals <- ggplot(data=allsolar[narr+obs>100]) + geom_point(aes(date, narr - obs), alpha = 0.1, color = "blue") + @@ -237,7 +247,7 @@ print(solarresiduals) ### Correlations of daily max solar radiation -```{r maxsolar-plot} +```{r maxsolar-plot, eval = FALSE} library(GGally) ggpairs(maxsolar[,list(obs, narr3h, narr, cruncep)]) ``` @@ -247,13 +257,13 @@ ggpairs(maxsolar[,list(obs, narr3h, narr, cruncep)]) ### Compare daily and 3hourly downscaled NARR -```{r} +```{r, eval = FALSE} weachnarr_narr3h ``` ### Multiple variables -```{r all-vars-plots, fig.height = 15, fig.width = 10} +```{r all-vars-plots, fig.height = 15, fig.width = 10, eval = FALSE} ### Generate some plots to compare August rh <- ggplot() + @@ -283,16 +293,16 @@ print(gridExtra::grid.arrange(rh, precip, temp, wind, solar, ncol = 1)) * Temperature: -```{r results='markup'} +```{r results='markup', eval=FALSE} kable(met[,list(min = min(temp), mean = mean(temp), max = max(temp)), by = source]) ``` * RH -```{r results='markup'} +```{r results='markup', eval=FALSE} kable(met[,list(min = min(RH*100), mean = mean(RH*100), max = max(RH*100)), by = source]) ``` * Total Precip -```{r results='markup'} +```{r results='markup', eval = FALSE} kable(met[,list(total=sum(precip)), by = source]) ``` @@ -301,7 +311,7 @@ kable(met[,list(total=sum(precip)), by = source]) * need to print each one ... -```{r more-plots, fig.height = 15, fig.width = 10} +```{r more-plots, fig.height = 15, fig.width = 10, eval = FALSE} obs <- merge(met[!source == "ebifarm"], met[source == "ebifarm"], by = "date") obs$yday <- yday(obs$date) diff --git a/modules/data.atmosphere/vignettes/tdm_downscaling.Rmd b/modules/data.atmosphere/vignettes/tdm_downscaling.Rmd index 4b3624cd138..df283806fad 100644 --- a/modules/data.atmosphere/vignettes/tdm_downscaling.Rmd +++ b/modules/data.atmosphere/vignettes/tdm_downscaling.Rmd @@ -1,5 +1,10 @@ -Temporally Downscale Meteorology -=============== +--- +title: "Temporally Downscale Meteorology" +output: html_vignette +vignette: > + %\VignetteIndexEntry{Temporally Downscale Meteorology} + %\VignetteEngine{knitr::rmarkdown} +--- ### Subdaily Training Data @@ -11,7 +16,7 @@ Examples: ### Extract Training Data and Merge All Years Into 1 File -```{r} +```{r, eval=FALSE} library(PEcAn.data.atmosphere) library(PEcAn.DB) @@ -41,7 +46,7 @@ nc.merge(outfolder = file.path(outfolder, "training_data"), in.path = file.path( ### Generate Linear Regression Models From Training Data Note: This requires ~ 120 GB of space if using the entire training dataset -```{r} +```{r, eval=FALSE} in.prefix <- "US-NR1" dat.train.file <- "~/Example/training_data/FLX_US-NR1_FLUXNET2015_SUBSET_HH_1998-2014_1-3_dat.train.nc" n.beta <- 10 # Number of betas for the linear regression model to create, we'll choose 10 for time's sake @@ -58,7 +63,7 @@ Examples: ### Extract Data We Want To Downscale -```{r} +```{r, eval=FALSE} start_date <- "2020-01-01" end_date <- "2020-12-31" site_id <- 772 @@ -71,7 +76,7 @@ download.MACA(outfolder, start_date, end_date, site_id, model, scenario, ensembl ### Predict Subdaily Data Using Statistics From Training Data -```{r} +```{r, eval=FALSE} in.path <- "~/Example/MACA_site_0-772" in.prefix <- "MACA.BNU-ESM.rcp85.r1i1p1" # this is the data we are going to downscale lm.models.base <- "~/Example/lm_model_output" # where we stored the lm models From bf66a239ad4c03516859a8f5f1eaf047df5d3f1f Mon Sep 17 00:00:00 2001 From: Chris Black Date: Sun, 28 Jul 2024 20:42:43 -0700 Subject: [PATCH 2/8] Update modules/allometry/R/AllomAve.R --- modules/allometry/R/AllomAve.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/allometry/R/AllomAve.R b/modules/allometry/R/AllomAve.R index f6aba498caf..af297be4d7c 100644 --- a/modules/allometry/R/AllomAve.R +++ b/modules/allometry/R/AllomAve.R @@ -9,7 +9,7 @@ #' AllomAve #' -#' Allometery wrapper function that handles loading and subsetting the data, +#' Allometry wrapper function that handles loading and subsetting the data, #' fitting the Bayesian models, and generating diagnostic figures. Set up to loop over #' multiple PFTs and components. #' Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. From 15b7e56736c2fb17c5958f6ae1009907e4c33e32 Mon Sep 17 00:00:00 2001 From: Chris Black Date: Sun, 28 Jul 2024 20:46:20 -0700 Subject: [PATCH 3/8] Update modules/allometry/man/AllomAve.Rd --- modules/allometry/man/AllomAve.Rd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/allometry/man/AllomAve.Rd b/modules/allometry/man/AllomAve.Rd index 8daa36d9453..7d9fc911bab 100644 --- a/modules/allometry/man/AllomAve.Rd +++ b/modules/allometry/man/AllomAve.Rd @@ -48,7 +48,7 @@ Default is stem biomass (6). See data(allom.components)} nested list of parameter summary statistics } \description{ -Allometery wrapper function that handles loading and subsetting the data, +Allometry wrapper function that handles loading and subsetting the data, fitting the Bayesian models, and generating diagnostic figures. Set up to loop over multiple PFTs and components. Writes raw MCMC and PDF of diagnositcs to file and returns table of summary stats. From 8128ae18d363e51d1071f776a8d76ec552334b9f Mon Sep 17 00:00:00 2001 From: Chris Black Date: Sun, 28 Jul 2024 21:00:21 -0700 Subject: [PATCH 4/8] deps --- docker/depends/pecan_package_dependencies.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/depends/pecan_package_dependencies.csv b/docker/depends/pecan_package_dependencies.csv index 54bde494f59..6bb9f38974b 100644 --- a/docker/depends/pecan_package_dependencies.csv +++ b/docker/depends/pecan_package_dependencies.csv @@ -123,6 +123,7 @@ "jsonlite","*","modules/data.atmosphere","Imports",FALSE "jsonlite","*","modules/data.remote","Suggests",FALSE "knitr","*","base/visualization","Suggests",FALSE +"knitr","*","modules/data.atmosphere","Suggests",FALSE "knitr",">= 1.42","base/db","Suggests",FALSE "knitr",">= 1.42","base/qaqc","Suggests",FALSE "knitr",">= 1.42","modules/allometry","Suggests",FALSE From 222bd80dfc526bec9f688b8defbbbe197e67d88a Mon Sep 17 00:00:00 2001 From: Chris Black Date: Mon, 29 Jul 2024 02:26:01 -0700 Subject: [PATCH 5/8] knitr vignettes need rmarkdown --- .github/workflows/ci-weekly.yml | 3 +++ .github/workflows/depends.yml | 10 +++++++--- base/visualization/DESCRIPTION | 1 + 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-weekly.yml b/.github/workflows/ci-weekly.yml index 18f16ff8bd5..36bdebe6950 100644 --- a/.github/workflows/ci-weekly.yml +++ b/.github/workflows/ci-weekly.yml @@ -13,6 +13,7 @@ jobs: fail-fast: false matrix: R: + - "4.3" - "devel" uses: ./.github/workflows/test.yml with: @@ -24,6 +25,7 @@ jobs: fail-fast: false matrix: R: + - "4.3" - "devel" uses: ./.github/workflows/check.yml with: @@ -36,6 +38,7 @@ jobs: fail-fast: false matrix: R: + - "4.3" - "devel" uses: ./.github/workflows/sipnet.yml with: diff --git a/.github/workflows/depends.yml b/.github/workflows/depends.yml index 7b5bde58c89..35070426540 100644 --- a/.github/workflows/depends.yml +++ b/.github/workflows/depends.yml @@ -31,6 +31,7 @@ jobs: - "4.1" - "4.2" - "4.3" + - "4.4" - "devel" steps: @@ -49,11 +50,14 @@ jobs: # calculate some variables that are used later - name: github branch - # build Rdevel only on Mondays, others every day (but not twice on Mondays) + # build weekly-tested versions only on Mondays, others every day + # (but not twice on Mondays) if: | github.event_name == 'workflow_dispatch' || - (matrix.R != 'devel' && github.event.schedule == '0 0 * * *') || - (matrix.R == 'devel' && github.event.schedule == '30 1 * * 1') + (contains(fromJSON('["4.1", "4.2", "4.4"]'), matrix.R) + && github.event.schedule == '0 0 * * *') || + (contains(fromJSON('[4.3", "devel"]'), matrix.R) + && github.event.schedule == '30 1 * * 1') run: | BRANCH=${GITHUB_REF##*/} echo "GITHUB_BRANCH=${BRANCH}" >> $GITHUB_ENV diff --git a/base/visualization/DESCRIPTION b/base/visualization/DESCRIPTION index c7f735f435f..136d5a9dbbe 100644 --- a/base/visualization/DESCRIPTION +++ b/base/visualization/DESCRIPTION @@ -42,6 +42,7 @@ Suggests: mockery, png, raster, + rmarkdown, sp, testthat (>= 1.0.2), withr From 828f312d2e6d60d56af16fee92ee338365fa6386 Mon Sep 17 00:00:00 2001 From: Chris Black Date: Mon, 29 Jul 2024 03:06:50 -0700 Subject: [PATCH 6/8] depends --- docker/depends/pecan_package_dependencies.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/depends/pecan_package_dependencies.csv b/docker/depends/pecan_package_dependencies.csv index 6bb9f38974b..c4ff7caaa45 100644 --- a/docker/depends/pecan_package_dependencies.csv +++ b/docker/depends/pecan_package_dependencies.csv @@ -483,6 +483,7 @@ "rlang","*","modules/uncertainty","Imports",FALSE "rlang",">= 0.2.0","modules/data.atmosphere","Imports",FALSE "rlist","*","modules/assim.sequential","Suggests",FALSE +"rmarkdown","*","base/visualization","Suggests",FALSE "rmarkdown",">= 2.19","base/db","Suggests",FALSE "rmarkdown",">= 2.19","base/qaqc","Suggests",FALSE "rmarkdown",">= 2.19","modules/allometry","Suggests",FALSE From db889819055dfca6a84592810a1dc0952acaec44 Mon Sep 17 00:00:00 2001 From: Chris Black Date: Mon, 29 Jul 2024 09:30:28 -0700 Subject: [PATCH 7/8] fix Roxygen breakage from sneaky curly apostrophe --- modules/assim.sequential/R/sda.enkf.R | 4 +- .../assim.sequential/man/sda.enkf.original.Rd | 43 +++++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 modules/assim.sequential/man/sda.enkf.original.Rd diff --git a/modules/assim.sequential/R/sda.enkf.R b/modules/assim.sequential/R/sda.enkf.R index 3dff5d07c79..20f1674d034 100644 --- a/modules/assim.sequential/R/sda.enkf.R +++ b/modules/assim.sequential/R/sda.enkf.R @@ -1,6 +1,6 @@ ##' State Variable Data Assimilation: Ensemble Kalman Filter -##’ -##’ Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), +##' +##' Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), ##' a new workflow folder is created and the previous forecast for the start_time is copied over. ##' During restart the initial run before the loop is skipped, with the info being populated from the previous run. ##' The function then dives right into the first Analysis, then continues on like normal. diff --git a/modules/assim.sequential/man/sda.enkf.original.Rd b/modules/assim.sequential/man/sda.enkf.original.Rd new file mode 100644 index 00000000000..6b59849a8a3 --- /dev/null +++ b/modules/assim.sequential/man/sda.enkf.original.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sda.enkf.R +\name{sda.enkf.original} +\alias{sda.enkf.original} +\title{State Variable Data Assimilation: Ensemble Kalman Filter} +\usage{ +sda.enkf.original( + settings, + obs.mean, + obs.cov, + IC = NULL, + Q = NULL, + adjustment = TRUE, + restart = NULL +) +} +\arguments{ +\item{settings}{PEcAn settings object} + +\item{obs.mean}{list of observations of the means of state variable (time X nstate)} + +\item{obs.cov}{list of observations of covariance matrices of state variables (time X nstate X nstate)} + +\item{IC}{initial conditions} + +\item{Q}{process covariance matrix given if there is no data to estimate it} + +\item{adjustment}{flag for using ensemble adjustment filter or not} + +\item{restart}{Used for iterative updating previous forecasts. This is a list that includes ens.inputs, the list of inputs by ensemble member, params, the parameters, and old_outdir, the output directory from the previous workflow. These three things are needed to ensure that if a new workflow is started that ensemble members keep there run-specific met and params. See Details} +} +\value{ +NONE +} +\description{ +Restart mode: Basic idea is that during a restart (primary case envisioned as an iterative forecast), + a new workflow folder is created and the previous forecast for the start_time is copied over. +During restart the initial run before the loop is skipped, with the info being populated from the previous run. +The function then dives right into the first Analysis, then continues on like normal. +} +\author{ +Michael Dietze and Ann Raiho \email{dietze@bu.edu} +} From 2481f31c70dc25e4fa4a6e66ea79e7f10aca9301 Mon Sep 17 00:00:00 2001 From: Chris Black Date: Mon, 29 Jul 2024 09:52:57 -0700 Subject: [PATCH 8/8] typo --- modules/allometry/R/allom.predict.R | 2 +- modules/allometry/man/allom.predict.Rd | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/allometry/R/allom.predict.R b/modules/allometry/R/allom.predict.R index 5dc032caa9f..2423fc283db 100644 --- a/modules/allometry/R/allom.predict.R +++ b/modules/allometry/R/allom.predict.R @@ -25,7 +25,7 @@ #' @param n Number of Monte Carlo samples. Defaults to the same number as in the MCMC object #' @param use c('Bg','mu','best') #' @param interval c('none','confidence','prediction') default is prediction -#' @param single.tree logical: Is this a DBH time series from one indidual tree? +#' @param single.tree logical: Is this a DBH time series from one individual tree? #' If TRUE, will use a fixed error for all draws. #' #' @return matrix of Monte Carlo predictions that has n rows and one column per DBH diff --git a/modules/allometry/man/allom.predict.Rd b/modules/allometry/man/allom.predict.Rd index 0c11064a542..031510f2504 100644 --- a/modules/allometry/man/allom.predict.Rd +++ b/modules/allometry/man/allom.predict.Rd @@ -36,7 +36,7 @@ Can be NULL if only one PFT/species exists, otherwise needs to the same length a \item{interval}{c('none','confidence','prediction') default is prediction} -\item{single.tree}{logical: Is this a DBH time series from one indidual tree? +\item{single.tree}{logical: Is this a DBH time series from one individual tree? If TRUE, will use a fixed error for all draws.} } \value{