diff --git a/doc/sphinx/source/recipes/figures/benchmarking/annual_cycle.png b/doc/sphinx/source/recipes/figures/benchmarking/annual_cycle.png new file mode 100644 index 0000000000..9836b2f6f0 Binary files /dev/null and b/doc/sphinx/source/recipes/figures/benchmarking/annual_cycle.png differ diff --git a/doc/sphinx/source/recipes/figures/benchmarking/boxplots.png b/doc/sphinx/source/recipes/figures/benchmarking/boxplots.png new file mode 100644 index 0000000000..247d42065a Binary files /dev/null and b/doc/sphinx/source/recipes/figures/benchmarking/boxplots.png differ diff --git a/doc/sphinx/source/recipes/figures/benchmarking/diurnal_cycle.png b/doc/sphinx/source/recipes/figures/benchmarking/diurnal_cycle.png new file mode 100644 index 0000000000..27ad9913bc Binary files /dev/null and b/doc/sphinx/source/recipes/figures/benchmarking/diurnal_cycle.png differ diff --git a/doc/sphinx/source/recipes/figures/benchmarking/map.png b/doc/sphinx/source/recipes/figures/benchmarking/map.png new file mode 100644 index 0000000000..80be821c1c Binary files /dev/null and b/doc/sphinx/source/recipes/figures/benchmarking/map.png differ diff --git a/doc/sphinx/source/recipes/figures/benchmarking/timeseries.png b/doc/sphinx/source/recipes/figures/benchmarking/timeseries.png new file mode 100644 index 0000000000..4cb5e0ea23 Binary files /dev/null and b/doc/sphinx/source/recipes/figures/benchmarking/timeseries.png differ diff --git a/doc/sphinx/source/recipes/figures/benchmarking/zonal.png b/doc/sphinx/source/recipes/figures/benchmarking/zonal.png new file mode 100644 index 0000000000..52a3938620 Binary files /dev/null and b/doc/sphinx/source/recipes/figures/benchmarking/zonal.png differ diff --git a/doc/sphinx/source/recipes/figures/model_evaluation/diurnal_cycle_clt_sepacific_3hr.png b/doc/sphinx/source/recipes/figures/model_evaluation/diurnal_cycle_clt_sepacific_3hr.png new file mode 100644 index 0000000000..35bee589eb Binary files /dev/null and b/doc/sphinx/source/recipes/figures/model_evaluation/diurnal_cycle_clt_sepacific_3hr.png differ diff --git a/doc/sphinx/source/recipes/figures/monitor/diurnal_cycle_clt_tropics_3hr.png b/doc/sphinx/source/recipes/figures/monitor/diurnal_cycle_clt_tropics_3hr.png new file mode 100644 index 0000000000..c91f757036 Binary files /dev/null and b/doc/sphinx/source/recipes/figures/monitor/diurnal_cycle_clt_tropics_3hr.png differ diff --git a/doc/sphinx/source/recipes/figures/monitor/diurnalcycle_pr_tropics_EC-Earth3_3hr_historical_r1i1p1f1.png b/doc/sphinx/source/recipes/figures/monitor/diurnalcycle_pr_tropics_EC-Earth3_3hr_historical_r1i1p1f1.png new file mode 100644 index 0000000000..1e2a77cec8 Binary files /dev/null and b/doc/sphinx/source/recipes/figures/monitor/diurnalcycle_pr_tropics_EC-Earth3_3hr_historical_r1i1p1f1.png differ diff --git a/doc/sphinx/source/recipes/index.rst b/doc/sphinx/source/recipes/index.rst index 0f0ce7667d..68c0b18d80 100644 --- a/doc/sphinx/source/recipes/index.rst +++ b/doc/sphinx/source/recipes/index.rst @@ -21,6 +21,7 @@ large variety of input data. .. toctree:: :maxdepth: 1 + recipe_benchmarking recipe_model_evaluation recipe_monitor recipe_psyplot diff --git a/doc/sphinx/source/recipes/recipe_benchmarking.rst b/doc/sphinx/source/recipes/recipe_benchmarking.rst new file mode 100644 index 0000000000..4ae0ca9907 --- /dev/null +++ b/doc/sphinx/source/recipes/recipe_benchmarking.rst @@ -0,0 +1,140 @@ +.. _recipe_benchmarking: + +Model Benchmarking +================== + +Overview +-------- + +These recipes and diagnostics are based on :ref:`recipe_monitor `: that allow plotting arbitrary preprocessor output, i.e., arbitrary variables from arbitrary datasets. An extension of these diagnostics is used to benchmark a model simulation with other datasets (e.g. CMIP6). The benchmarking features are described in `Lauer et al.`_:. + +.. _`Lauer et al.`: A. Lauer, Bock, L., Hassler, B., Jöckel, P., Ruhe, L., and Schlund, M.: Monitoring and benchmarking Earth System Model simulations with ESMValTool v2.12.0, Geosci. Model Dev. (submitted). + +Available recipes and diagnostics +--------------------------------- + +Recipes are stored in `recipes/model_evaluation` + +* recipe_model_benchmarking_annual_cycle.yml +* recipe_model_benchmarking_boxplots.yml +* recipe_model_benchmarking_diurnal_cycle.yml +* recipe_model_benchmarking_maps.yml +* recipe_model_benchmarking_timeseries.yml +* recipe_model_benchmarking_zonal.yml + +Diagnostics are stored in `diag_scripts/monitor/` + +* :ref:`multi_datasets.py + `: + Monitoring diagnostic to show multiple datasets in one plot (incl. biases). + + +Recipe settings +~~~~~~~~~~~~~~~ + +See :ref:`multi_datasets.py`: for a list of all possible configuration options that can be specified in the recipe. + +.. note:: + Please note that exactly one dataset (the dataset to be benchmarked) needs to specify the facet ``benchmark_dataset: True`` in the dataset entry of the recipe. For line plots (i.e. annual cycle, seasonal cycle, diurnal cycle, time series), it is recommended, to specify a particular line color and line style in the ``scripts`` section of the recipe for the dataset to be benchmarked (``benchmark_dataset: True``) so that this dataset is easy to identify in the plot. In the example below, MIROC6 is the dataset to be benchmarked and ERA5 is used as a reference dataset. + +.. code-block:: yaml + + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + plot_filename: '{plot_type}_{real_name}_{mip}' + group_variables_by: variable_group + facet_used_for_labels: alias + plots: + diurnal_cycle: + annual_mean_kwargs: False + legend_kwargs: + loc: upper right + plot_kwargs: + 'MIROC6': + color: red + label: '{alias}' + linestyle: '-' + linewidth: 2 + zorder: 4 + ERA5: + color: black + label: '{dataset}' + linestyle: '-' + linewidth: 2 + zorder: 3 + MultiModelPercentile10: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + MultiModelPercentile90: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + default: + color: lightgray + label: null + linestyle: '-' + linewidth: 1 + zorder: 1 + +Variables +--------- + +Any, but the variables' number of dimensions should match the ones expected by each plot. + +References +---------- + +* Lauer, A., L. Bock, B. Hassler, P. Jöckel, L. Ruhe, and M. Schlund: Monitoring and benchmarking Earth System Model simulations with ESMValTool v2.12.0, Geosci. Model Dev., xx, xxxx-xxxx, + doi: xxx, 202x. + +Example plots +------------- + +.. _fig_benchmarking_annual_cycle: +.. figure:: /recipes/figures/benchmarking/annual_cycle.png + :align: center + :width: 16cm + +(Left) Multi-year global mean (2000-2004) of the seasonal cycle of near-surface temperature in K from a simulation of MIROC6 and the reference dataset HadCRUT5 (black). The thin gray lines show individual CMIP6 models used for comparison, the dashed gray lines show the 10% and 90% percentiles of these CMIP6 models. (Right) same as (left) but for area-weighted RMSE of near-surface temperature. The light blue shading shows the range of the 10% to 90% percentiles of RMSE values from the ensemble of CMIP6 models used for comparison. Created with recipe_model_benchmarking_annual_cycle.yml. + +.. _fig_benchmarking_boxplots: +.. figure:: /recipes/figures/benchmarking/boxplots.png + :align: center + :width: 16cm + +(Left) Global area-weighted RMSE (smaller=better), (middle) weighted Pearson’s correlation coefficient (higher=better) and (right) weighted Earth mover’s distance (smaller=better) of the geographical pattern of 5-year means of different variables from a simulation of MIROC6 (red cross) in comparison to the CMIP6 ensemble (boxplot). Reference datasets for calculating the three metrics are: near-surface temperature (tas): HadCRUT5, surface temperature (ts): HadISST, precipitation (pr): GPCP-SG, air pressure at sea level (psl): ERA5, shortwave (rsut) longwave (rlut) radiative fluxes at TOA and shortwave (swcre) and longwave (lwcre) cloud radiative effects: CERES-EBAF. Each box indicates the range from the first quartile to the third quartile, the vertical lines show the median, and the whiskers the minimum and maximum values, excluding the outliers. Outliers are defined as being outside 1.5 times the interquartile range. Created with recipe_model_benchmarking_boxplots.yml. + +.. _fig_benchmarking_diurn_cycle: +.. figure:: /recipes/figures/benchmarking/diurnal_cycle.png + :align: center + :width: 10cm + +Area-weighted RMSE of the annual mean diurnal cycle (year 2000) of precipitation averaged over the tropical ocean (ocean grid cells in the latitude belt 30°S to 30°N) from a simulation of MIROC6 averaged compared with ERA5 data (black). The light blue shading shows the range of the 10% to 90% percentiles of RMSE values from the ensemble of CMIP6 models used for comparison. Created with recipe_benchmarking_diurnal_cycle.yml. + +.. _fig_benchmarking_map: +.. figure:: /recipes/figures/benchmarking/map.png + :align: center + :width: 10cm + +5-year annual mean (2000-2004) area-weighted RMSE of the precipitation rate in mm day-1 from a simulation of MIROC6 compared with GPCP-SG data. The stippled areas mask grid cells where the RMSE is smaller than the 90% percentile of RMSE values from an ensemble of CMIP6 models. Created with recipe_model_benchmarking_maps.yml + +.. _fig_benchmarking_timeseries: +.. figure:: /recipes/figures/benchmarking/timeseries.png + :align: center + :width: 16cm + +(Left) Time series from 2000 through 2014 of global average monthly mean temperature anomalies (reference period 2000-2009) of the near-surface temperature in K from a simulation of MIROC6 (red) and the reference dataset HadCRUT5 (black). The thin gray lines show individual CMIP6 models used for comparison, the dashed gray lines show the 10% and 90% percentiles of these CMIP6 models. (Right) same as (left) but for area-weighted RMSE of the near-surface air temperature. The light blue shading shows the range of the 10% to 90% percentiles of RMSE values from the ensemble of CMIP6 models used for comparison. Created with recipe_model_benchmarking_timeseries.yml. + +.. _fig_benchmarking_zonal: +.. figure:: /recipes/figures/benchmarking/zonal.png + :align: center + :width: 10cm + +5-year annual mean bias (2000-2004) of the zonally averaged temperature in K from a historical simulation of MIROC6 compared with ERA5 reanalysis data. The stippled areas mask grid cells where the absolute BIAS (${\abs{BIAS}}$) is smaller than the maximum of the absolute 10% (${\abs{p10}}$) and the absolute 90% (${\abs{p90}}$) percentiles from an ensemble of CMIP6 models, i.e. ${\abs{BIAS} \geq max( \abs{p10}, \abs{p90})}$. Created with recipe_model_benchmarking_zonal.yml. diff --git a/doc/sphinx/source/recipes/recipe_model_evaluation.rst b/doc/sphinx/source/recipes/recipe_model_evaluation.rst index 9e199815e0..60ac1ac1c6 100644 --- a/doc/sphinx/source/recipes/recipe_model_evaluation.rst +++ b/doc/sphinx/source/recipes/recipe_model_evaluation.rst @@ -96,3 +96,10 @@ Zonal mean precipitation. :width: 14cm Annual cycle of Southern Ocean total cloud cover. + +.. _fig_6: +.. figure:: /recipes/figures/model_evaluation/diurnal_cycle_clt_sepacific_3hr.png + :align: center + :width: 14cm + +Diurnal cycle of Southeast Pacific total cloud cover. diff --git a/doc/sphinx/source/recipes/recipe_monitor.rst b/doc/sphinx/source/recipes/recipe_monitor.rst index ee3b9b44fa..8ed0b1b4f6 100644 --- a/doc/sphinx/source/recipes/recipe_monitor.rst +++ b/doc/sphinx/source/recipes/recipe_monitor.rst @@ -189,6 +189,18 @@ Timeseries of tas including a reference dataset. Annual cycle of tas including a reference dataset. +.. _fig_diurnal_cycle: +.. figure:: /recipes/figures/monitor/diurnalcycle_pr_tropics_EC-Earth3_3hr_historical_r1i1p1f1.png + :align: center + :width: 14cm + +.. _fig_diurnal_cycle_with_ref: +.. figure:: /recipes/figures/monitor/diurnal_cycle_clt_tropics_3hr.png + :align: center + :width: 14cm + +Diurnal cycle of clt including a reference dataset. + .. _fig_map_with_ref: .. figure:: /recipes/figures/monitor/map_with_ref.png :align: center diff --git a/esmvaltool/config-references.yml b/esmvaltool/config-references.yml index b5f43bc911..e1b45bf049 100644 --- a/esmvaltool/config-references.yml +++ b/esmvaltool/config-references.yml @@ -774,6 +774,7 @@ projects: crescendo: EU H2020 project CRESCENDO dlrveu2: DLR project VEU2 dlrveu: DLR project VEU + dlrmabak: DLR project MABAK embrace: EU FP7 project EMBRACE esm2025: EU H2020 project ESM2025 - Earth system models for the future esmval: DLR project ESMVal diff --git a/esmvaltool/diag_scripts/clouds/clouds.ncl b/esmvaltool/diag_scripts/clouds/clouds.ncl index c05c091cf4..d3d1ef3b8e 100644 --- a/esmvaltool/diag_scripts/clouds/clouds.ncl +++ b/esmvaltool/diag_scripts/clouds/clouds.ncl @@ -114,7 +114,8 @@ begin variables = metadata_att_as_array(variable_info, "short_name") if (.not. any(variables .eq. var0)) then - errstr = "diagnostic " + diag + " requires the following variable: " + var0 + errstr = "diagnostic " + DIAG_SCRIPT \ + + " requires the following variable: " + var0 error_msg("f", DIAG_SCRIPT, "", errstr) end if @@ -539,6 +540,10 @@ begin res@cnLevels = ispan(0, 60, 5) end if + if (var0.eq."ts") then + res@cnLevels = ispan(274, 304, 2) + end if + ; res@lbLabelBarOn = False res@gsnRightString = "" diff --git a/esmvaltool/diag_scripts/monitor/monitor.py b/esmvaltool/diag_scripts/monitor/monitor.py index 59e37b9842..aca59d18e2 100644 --- a/esmvaltool/diag_scripts/monitor/monitor.py +++ b/esmvaltool/diag_scripts/monitor/monitor.py @@ -27,6 +27,11 @@ produce multi panel plots for data with `shape_id` or `region` coordinates of length > 1. Supported coordinates: `time`, `shape_id` (optional) and `region` (optional). + - Diurnal cycle (plot type ``diurnal_cycle``): Generate a diurnal cycle + plot (timeseries like climatological from 0 to 24 hours). It will + produce multi panel plots for data with `shape_id` or `region` + coordinates of length > 1. Supported coordinates: `time`, `shape_id` + (optional) and `region` (optional). Configuration options in recipe ------------------------------- @@ -39,10 +44,10 @@ monitor configuration file can be found :ref:`here `. plots: dict, optional Plot types plotted by this diagnostic (see list above). Dictionary keys - must be ``clim``, ``seasonclim``, ``monclim``, ``timeseries`` or - ``annual_cycle``. Dictionary values are dictionaries used as options for - the corresponding plot. The allowed options for the different plot types - are given below. + must be ``clim``, ``seasonclim``, ``monclim``, ``timeseries``, + ``annual_cycle`` or ``diurnal_cycle``. Dictionary values are dictionaries + used as options for the corresponding plot. The allowed options for the + different plot types are given below. plot_filename: str, optional Filename pattern for the plots. Defaults to ``{plot_type}_{real_name}_{dataset}_{mip}_{exp}_{ensemble}``. @@ -98,6 +103,10 @@ ---------------------------------------------------- None +Configuration options for plot type ``diurnal_cycle`` +----------------------------------------------------- +None + .. hint:: Extra arguments given to the recipe are ignored, so it is safe to use yaml @@ -166,6 +175,7 @@ def compute(self): self.timeseries(cube, var_info) self.plot_annual_cycle(cube, var_info) + self.plot_diurnal_cycle(cube, var_info) self.plot_monthly_climatology(cube, var_info) self.plot_seasonal_climatology(cube, var_info) self.plot_climatology(cube, var_info) @@ -280,6 +290,57 @@ def plot_annual_cycle(self, cube, var_info): caption=caption, ) + def plot_diurnal_cycle(self, cube, var_info): + """Plot the diurnal cycle according to configuration. + + The key 'diurnal_cycle' must be passed to the 'plots' option in the + configuration. + + Parameters + ---------- + cube: iris.cube.Cube + Data to plot. Must be 1D with time or 2D with an extra 'shape_id' + or 'region' coordinate. In that case, the plot will be a multiple + one with one figure for each region + var_info: dict + Variable's metadata from ESMValTool + + Warning + ------- + The hourly climatology is done inside the function so the users can + plot both the timeseries and the diurnal cycle in one go + """ + if 'diurnal_cycle' not in self.plots: + return + cube = climate_statistics(cube, period='hour') + + plotter = PlotSeries() + plotter.outdir = self.get_plot_folder(var_info) + plotter.img_template = self.get_plot_path('diurnalcycle', var_info, + add_ext=False) + plotter.filefmt = self.cfg['output_file_type'] + region_coords = ('shape_id', 'region') + options = { + 'xlabel': '', + 'xlimits': None, + 'suptitle': 'Diurnal cycle', + } + for region_coord in region_coords: + if cube.coords(region_coord): + plotter.multiplot_cube(cube, 'month', region_coord, **options) + return + plotter.plot_cube(cube, 'hour', **options) + caption = (f"Diurnal cycle of {var_info[n.LONG_NAME]} of " + f"dataset {var_info[n.DATASET]} (project " + f"{var_info[n.PROJECT]}) from {var_info[n.START_YEAR]} to " + f"{var_info[n.END_YEAR]}.") + self.record_plot_provenance( + self.get_plot_path('diurnalcycle', var_info), + var_info, + 'Diurnal cycle', + caption=caption, + ) + def plot_monthly_climatology(self, cube, var_info): """Plot the monthly climatology as a multipanel plot. diff --git a/esmvaltool/diag_scripts/monitor/multi_datasets.py b/esmvaltool/diag_scripts/monitor/multi_datasets.py index a760a312f6..a3f48806ad 100644 --- a/esmvaltool/diag_scripts/monitor/multi_datasets.py +++ b/esmvaltool/diag_scripts/monitor/multi_datasets.py @@ -17,6 +17,9 @@ - Annual cycle (plot type ``annual_cycle``): for each variable separately, all datasets are plotted in one single figure. Input data needs to be 1D with single dimension `month_number`. + - Diurnal cycle (plot type ``diurnal_cycle``): for each variable + separately, all datasets are plotted in one single figure. Input data + needs to be 1D with single dimension `hour`. - Maps (plot type ``map``): for each variable and dataset, an individual map is plotted. If a reference dataset is defined, also include this dataset and a bias plot into the figure. Note that if a reference dataset @@ -65,6 +68,14 @@ :func:`esmvalcore.preprocessor.regrid` for this). Input data needs to be 2D with dimensions `time`, `latitude`/`longitude`. + Benchmarking plots + - annual cycles (``benchmarking_annual_cycle``) + - box plots (``benchmarking_boxplot``) + - diurnal cycles (``benchmarking_annual_cycle``) + - maps (``benchmarking_map``) + - time series (``benchmarking_timeseries``) + - zonal mean profiles (plot type ``benchmarking_zonal``) + Author ------ Manuel Schlund (DLR, Germany) @@ -149,8 +160,8 @@ the time axis using :class:`matplotlib.dates.DateFormatter`. If ``None``, use the default formatting imposed by the iris plotting function. -Configuration options for plot type ``annual_cycle`` ----------------------------------------------------- +Configuration options for plot type ``annual_cycle`` and ``diurnal_cycle`` +-------------------------------------------------------------------------- gridline_kwargs: dict, optional Optional keyword arguments for grid lines. By default, ``color: lightgrey, alpha: 0.5`` are used. Use ``gridline_kwargs: false`` to not show grid @@ -298,6 +309,7 @@ Fontsize used for ticks, labels and titles. For the latter, use the given fontsize plus 2. Does not affect suptitles. log_y: bool, optional (default: True) + variables = var_order Use logarithmic Y-axis. plot_func: str, optional (default: 'contourf') Plot function used to plot the profiles. Must be a function of @@ -576,6 +588,24 @@ the time axis using :class:`matplotlib.dates.DateFormatter`. If ``None``, use the default formatting imposed by the iris plotting function. +Configuration options for plot type ``benchmarking_annual_cycle`` +----------------------------------------------------------------- + +Configuration options for plot type ``benchmarking_boxplot`` +------------------------------------------------------------ + +Configuration options for plot type ``benchmarking_diurnal_cycle`` +------------------------------------------------------------------ + +Configuration options for plot type ``benchmarking_map`` +-------------------------------------------------------- + +Configuration options for plot type ``benchmarking_timeseries`` +--------------------------------------------------------------- + +Configuration options for plot type ``benchmarking_zonal`` +---------------------------------------------------------- + .. hint:: Extra arguments given to the recipe are ignored, so it is safe to use yaml @@ -600,6 +630,7 @@ import matplotlib.dates as mdates import matplotlib.pyplot as plt import numpy as np +import pandas as pd import seaborn as sns from iris.analysis.cartography import area_weights from iris.coord_categorisation import add_year @@ -636,9 +667,14 @@ def __init__(self, config): # Get default settings self.cfg = deepcopy(self.cfg) + self.cfg.setdefault('add_ancillary_variables', False) + self.cfg.setdefault('add_aux_coords', False) + self.cfg.setdefault('add_cell_measures', False) self.cfg.setdefault('facet_used_for_labels', 'dataset') + self.cfg.setdefault('facets_as_columns', []) self.cfg.setdefault('figure_kwargs', {'constrained_layout': True}) self.cfg.setdefault('group_variables_by', 'short_name') + self.cfg.setdefault('groupby_facet', 'short_name') self.cfg.setdefault('savefig_kwargs', { 'bbox_inches': 'tight', 'dpi': 300, @@ -674,12 +710,19 @@ def __init__(self, config): self.supported_plot_types = [ 'timeseries', 'annual_cycle', + 'diurnal_cycle', 'map', 'zonal_mean_profile', '1d_profile', 'variable_vs_lat', 'hovmoeller_z_vs_time', 'hovmoeller_time_vs_lat_or_lon', + 'benchmarking_annual_cycle', + 'benchmarking_boxplot', + 'benchmarking_diurnal_cycle', + 'benchmarking_map', + 'benchmarking_timeseries', + 'benchmarking_zonal', ] for (plot_type, plot_options) in self.plots.items(): if plot_type not in self.supported_plot_types: @@ -698,12 +741,45 @@ def __init__(self, config): self.plots[plot_type].setdefault('pyplot_kwargs', {}) self.plots[plot_type].setdefault('time_format', None) + elif plot_type == 'benchmarking_timeseries': + self.plots[plot_type].setdefault('annual_mean_kwargs', {}) + self.plots[plot_type].setdefault('gridline_kwargs', {}) + self.plots[plot_type].setdefault('legend_kwargs', {}) + self.plots[plot_type].setdefault('plot_kwargs', {}) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + self.plots[plot_type].setdefault('time_format', None) + elif plot_type == 'annual_cycle': self.plots[plot_type].setdefault('gridline_kwargs', {}) self.plots[plot_type].setdefault('legend_kwargs', {}) self.plots[plot_type].setdefault('plot_kwargs', {}) self.plots[plot_type].setdefault('pyplot_kwargs', {}) + elif plot_type == 'benchmarking_annual_cycle': + self.plots[plot_type].setdefault('gridline_kwargs', {}) + self.plots[plot_type].setdefault('legend_kwargs', {}) + self.plots[plot_type].setdefault('plot_kwargs', {}) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + + elif plot_type == 'diurnal_cycle': + self.plots[plot_type].setdefault('gridline_kwargs', {}) + self.plots[plot_type].setdefault('legend_kwargs', {}) + self.plots[plot_type].setdefault('plot_kwargs', {}) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + + elif plot_type == 'benchmarking_diurnal_cycle': + self.plots[plot_type].setdefault('gridline_kwargs', {}) + self.plots[plot_type].setdefault('legend_kwargs', {}) + self.plots[plot_type].setdefault('plot_kwargs', {}) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + + elif plot_type == 'benchmarking_boxplot': + self.plots[plot_type].setdefault('plot_kwargs', {}) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + self.plots[plot_type].setdefault('var_order', None) + self.plots[plot_type].setdefault('label', []) + self.plots[plot_type].setdefault('fontsize', 10) + elif plot_type == 'map': self.plots[plot_type].setdefault( 'cbar_label', '{short_name} [{units}]') @@ -738,6 +814,33 @@ def __init__(self, config): self.plots[plot_type].setdefault('x_pos_stats_avg', 0.0) self.plots[plot_type].setdefault('x_pos_stats_bias', 0.92) + elif plot_type == 'benchmarking_map': + self.plots[plot_type].setdefault( + 'cbar_label', '{short_name} [{units}]') + self.plots[plot_type].setdefault( + 'cbar_label_bias', '{short_name} [{units}]') + self.plots[plot_type].setdefault( + 'cbar_kwargs', {'orientation': 'horizontal', 'aspect': 30} + ) + self.plots[plot_type].setdefault('cbar_kwargs_bias', {}) + self.plots[plot_type].setdefault('common_cbar', False) + self.plots[plot_type].setdefault('fontsize', 10) + self.plots[plot_type].setdefault('gridline_kwargs', {}) + self.plots[plot_type].setdefault('plot_func', 'contourf') + self.plots[plot_type].setdefault('plot_kwargs', {}) + if 'projection' not in self.plots[plot_type]: + self.plots[plot_type].setdefault('projection', 'Robinson') + self.plots[plot_type].setdefault( + 'projection_kwargs', {'central_longitude': 10} + ) + else: + self.plots[plot_type].setdefault('projection_kwargs', {}) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + self.plots[plot_type].setdefault('rasterize', True) + self.plots[plot_type].setdefault('show_stats', True) + self.plots[plot_type].setdefault('x_pos_stats_avg', 0.0) + self.plots[plot_type].setdefault('x_pos_stats_bias', 0.92) + elif plot_type == 'zonal_mean_profile': self.plots[plot_type].setdefault( 'cbar_label', '{short_name} [{units}]') @@ -768,6 +871,36 @@ def __init__(self, config): self.plots[plot_type].setdefault('x_pos_stats_avg', 0.01) self.plots[plot_type].setdefault('x_pos_stats_bias', 0.7) + elif plot_type == 'benchmarking_zonal': + self.plots[plot_type].setdefault( + 'cbar_label', '{short_name} [{units}]') + self.plots[plot_type].setdefault( + 'cbar_label_bias', 'Δ{short_name} [{units}]') + self.plots[plot_type].setdefault( + 'cbar_kwargs', {'orientation': 'vertical'} + ) + self.plots[plot_type].setdefault('cbar_kwargs_bias', {}) + self.plots[plot_type].setdefault('common_cbar', False) + self.plots[plot_type].setdefault('fontsize', 10) + self.plots[plot_type].setdefault('log_y', True) + self.plots[plot_type].setdefault('plot_func', 'contourf') + self.plots[plot_type].setdefault('plot_kwargs', {}) + self.plots[plot_type].setdefault('plot_kwargs_bias', {}) + self.plots[plot_type]['plot_kwargs_bias'].setdefault( + 'cmap', 'bwr' + ) + self.plots[plot_type]['plot_kwargs_bias'].setdefault( + 'norm', 'centered' + ) + self.plots[plot_type].setdefault('pyplot_kwargs', {}) + self.plots[plot_type].setdefault('rasterize', True) + self.plots[plot_type].setdefault('show_stats', True) + self.plots[plot_type].setdefault( + 'show_y_minor_ticklabels', False + ) + self.plots[plot_type].setdefault('x_pos_stats_avg', 0.01) + self.plots[plot_type].setdefault('x_pos_stats_bias', 0.7) + elif plot_type == '1d_profile': self.plots[plot_type].setdefault('aspect_ratio', 1.5) self.plots[plot_type].setdefault('gridline_kwargs', {}) @@ -779,6 +912,7 @@ def __init__(self, config): self.plots[plot_type].setdefault( 'show_y_minor_ticklabels', False ) + elif plot_type == 'variable_vs_lat': self.plots[plot_type].setdefault('gridline_kwargs', {}) self.plots[plot_type].setdefault('legend_kwargs', {}) @@ -1041,6 +1175,20 @@ def _get_map_projection(self): return getattr(ccrs, projection)(**projection_kwargs) + def _get_benchmarking_projection(self): + """Get projection used for benchmarking map plots.""" + plot_type = 'benchmarking_map' + projection = self.plots[plot_type]['projection'] + projection_kwargs = self.plots[plot_type]['projection_kwargs'] + + # Check if desired projection is valid + if not hasattr(ccrs, projection): + raise AttributeError( + f"Got invalid projection '{projection}' for plotting " + f"{plot_type}, expected class of cartopy.crs") + + return getattr(ccrs, projection)(**projection_kwargs) + def _get_plot_func(self, plot_type): """Get plot function.""" plot_func = self.plots[plot_type]['plot_func'] @@ -1079,8 +1227,10 @@ def _get_plot_kwargs(self, plot_type, dataset, bias=False): plot_kwargs[key] = val # Default settings for different plot types - if plot_type in ('timeseries', 'annual_cycle', '1d_profile', - 'variable_vs_lat'): + if plot_type in ('timeseries', 'annual_cycle', + 'benchmarking_annual_cycle', '1d_profile', + 'diurnal_cycle', 'benchmarking_diurnal_cycle', + 'variable_vs_lat', 'benchmarking_timeseries'): plot_kwargs.setdefault('label', label) if plot_kwargs.get('norm') == 'centered': @@ -1779,6 +1929,195 @@ def _plot_hovmoeller_time_vs_lat_or_lon_without_ref(self, plot_func, netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) return (plot_path, {netcdf_path: cube}) + def _plot_benchmarking_map(self, plot_func, dataset, percentile_dataset, + metric): + """Plot benchmarking map plot.""" + plot_type = 'benchmarking_map' + logger.info("Plotting benchmarking map for '%s'", + self._get_label(dataset)) + + # Make sure that the data has the correct dimensions + cube = dataset['cube'] + # dim_coords_dat = self._check_cube_dimensions(cube, plot_type) + + # Create plot with desired settings + with mpl.rc_context(self._get_custom_mpl_rc_params(plot_type)): + fig = plt.figure(**self.cfg['figure_kwargs']) + axes = fig.add_subplot( + projection=self._get_benchmarking_projection()) + plot_kwargs = self._get_plot_kwargs(plot_type, dataset) + plot_kwargs['axes'] = axes + plot_kwargs['extend'] = "both" + plot_map = plot_func(cube, **plot_kwargs) + + # apply stippling (dots) to all grid cells that do not exceed + # the upper percentile given by 'percentile_dataset[]' + + mask_cube = self._get_benchmark_mask(cube, percentile_dataset, + metric) + + hatching = plot_func( + mask_cube, + colors='none', + levels=[.5, 1.5], + hatches=['......'], + ) + + # set color for stippling to 'black' (default = 'white') + hatching.set_edgecolor('black') + hatching.set_linewidth(0.) + + + axes.coastlines() + # gridline_kwargs = self._get_gridline_kwargs(plot_type) + # if gridline_kwargs is not False: + # axes.gridlines(**gridline_kwargs) + + # Setup colorbar + fontsize = self.plots[plot_type]['fontsize'] + colorbar = fig.colorbar(plot_map, ax=axes, + **self._get_cbar_kwargs(plot_type)) + colorbar.set_label(self._get_cbar_label(plot_type, dataset), + fontsize=fontsize) + colorbar.ax.tick_params(labelsize=fontsize) + + # Customize plot + axes.set_title(self._get_label(dataset)) + fig.suptitle(f"{dataset['long_name']} ({dataset['start_year']}-" + f"{dataset['end_year']})") + self._process_pyplot_kwargs(plot_type, dataset) + + # Rasterization + if self.plots[plot_type]['rasterize']: + self._set_rasterized([axes]) + + # File paths + plot_path = self.get_plot_path(plot_type, dataset) + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + + return (plot_path, {netcdf_path: cube}) + + def _plot_benchmarking_boxplot(self, df, cubes, variables, datasets): + """Plot benchmarking boxplot.""" + plot_type = 'benchmarking_boxplot' + logger.info("Plotting benchmarking boxplot for '%s'", + self._get_label(datasets[0])) + + # Create plot with desired settings + with mpl.rc_context(self._get_custom_mpl_rc_params(plot_type)): + fig = plt.figure(**self.cfg['figure_kwargs']) + metric = cubes[0].long_name.partition("of")[0] + fig.suptitle(f"{metric}of {self._get_label(datasets[0])}" + f" ({datasets[0]['start_year']} - " + f"{datasets[0]['end_year']})") + + sns.set_style('darkgrid') + + for i, var in enumerate(variables): + axes = plt.subplot(1, len(variables), i+1) + plot_kwargs = self._get_plot_kwargs(plot_type, datasets[i]) + plot_kwargs['axes'] = axes + + plot_boxplot = sns.boxplot(data=df[df['Variable'] == var]) + plot_boxplot.set(xticklabels=[]) + # plot_map = plot_func(cube, **plot_kwargs) + + plt.scatter(0, cubes[i].data, marker='x', s=200, linewidths=2, + color="red", zorder=3) + + plt.xlabel(var) + if cubes[i].units != 1: + plt.ylabel(cubes[i].units) + + # Setup fontsize + # fontsize = self.plots[plot_type]['fontsize'] + + # Customize plot + self._process_pyplot_kwargs(plot_type, datasets[i]) + + # File paths + datasets[0]['variable_group'] = ( + datasets[0]['short_name'].partition("_")[0]) + plot_path = self.get_plot_path(plot_type, datasets[0]) + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + + return (plot_path, {netcdf_path: cubes[0]}) + + def _plot_benchmarking_zonal(self, plot_func, dataset, percentile_dataset, + metric): + """Plot benchmarking zonal mean profile.""" + plot_type = 'benchmarking_zonal' + logger.info("Plotting benchmarking zonal mean profile" + " for '%s'", + self._get_label(dataset)) + + # Make sure that the data has the correct dimensions + cube = dataset['cube'] + + # Create plot with desired settings + with mpl.rc_context(self._get_custom_mpl_rc_params(plot_type)): + fig = plt.figure(**self.cfg['figure_kwargs']) + axes = fig.add_subplot() + plot_kwargs = self._get_plot_kwargs(plot_type, dataset) + plot_kwargs['axes'] = axes + plot_kwargs['extend'] = "both" + plot_benchmarking_zonal = plot_func(cube, **plot_kwargs) + + # apply stippling (dots) to all grid cells that do not exceed + # the upper percentile given by 'percentile_dataset[]' + + mask_cube = self._get_benchmark_mask(cube, percentile_dataset, + metric) + hatching = plot_func( + mask_cube, + colors='none', + levels=[.5, 1.5], + hatches=['......'], + ) + + # set color for stippling to 'black' (default = 'white') + hatching.set_edgecolor('black') + hatching.set_linewidth(0.) + + # Print statistics if desired + # self._add_stats(plot_type, axes, dim_coords_dat, dataset) + + # Setup colorbar + fontsize = self.plots[plot_type]['fontsize'] + colorbar = fig.colorbar(plot_benchmarking_zonal, ax=axes, + **self._get_cbar_kwargs(plot_type)) + colorbar.set_label(self._get_cbar_label(plot_type, dataset), + fontsize=fontsize) + colorbar.ax.tick_params(labelsize=fontsize) + + # Customize plot + axes.set_title(self._get_label(dataset)) + fig.suptitle(f"{dataset['long_name']} ({dataset['start_year']}-" + f"{dataset['end_year']})") + axes.set_xlabel('latitude [°N]') + z_coord = cube.coord(axis='Z') + axes.set_ylabel(f'{z_coord.long_name} [{z_coord.units}]') + if self.plots[plot_type]['log_y']: + axes.set_yscale('log') + axes.get_yaxis().set_major_formatter( + FormatStrFormatter('%.1f')) + if self.plots[plot_type]['show_y_minor_ticklabels']: + axes.get_yaxis().set_minor_formatter( + FormatStrFormatter('%.1f')) + else: + axes.get_yaxis().set_minor_formatter(NullFormatter()) + self._process_pyplot_kwargs(plot_type, dataset) + + # Rasterization + if self.plots[plot_type]['rasterize']: + self._set_rasterized([axes]) + + # File paths + plot_path = self.get_plot_path(plot_type, dataset) + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + + return (plot_path, {netcdf_path: cube}) + def _process_pyplot_kwargs(self, plot_type, dataset): """Process functions for :mod:`matplotlib.pyplot`.""" pyplot_kwargs = self.plots[plot_type]['pyplot_kwargs'] @@ -1799,7 +2138,15 @@ def _check_cube_dimensions(cube, plot_type): """Check that cube has correct dimensional variables.""" expected_dimensions_dict = { 'annual_cycle': (['month_number'],), + 'benchmarking_boxplot': (['']), + 'diurnal_cycle': (['hour'],), 'map': (['latitude', 'longitude'],), + 'benchmarking_annual_cycle': (['month_number'],), + 'benchmarking_diurnal_cycle': (['hour'],), + 'benchmarking_map': (['latitude', 'longitude'],), + 'benchmarking_timeseries': (['time'],), + 'benchmarking_zonal': (['latitude', 'air_pressure'], + ['latitude', 'altitude']), 'zonal_mean_profile': (['latitude', 'air_pressure'], ['latitude', 'altitude']), 'timeseries': (['time'],), @@ -1862,6 +2209,140 @@ def _get_reference_dataset(self, datasets): return ref_datasets[0] return None + def _get_benchmarking_reference(self, datasets): + """Extract reference dataset for calculation of benchmarking metric.""" + variable = datasets[0][self.cfg['group_variables_by']] + ref_datasets = [d for d in datasets if + d.get('reference_for_metric', False)] + + if len(ref_datasets) == 1: + return ref_datasets[0] + + # try variable attribute "reference_dataset" + for dataset in datasets: + print(dataset.get('reference_dataset')) + print(dataset.get('dataset')) + if dataset.get('reference_dataset') == dataset.get('dataset'): + ref_datasets = dataset + break + if len(ref_datasets) != 1: + raise ValueError( + f"Expected exactly 1 reference dataset for variable " + f"'{variable}', got {len(ref_datasets)}") + return None + + def _get_benchmark_datasets(self, datasets): + """Get dataset to be benchmarked.""" + variable = datasets[0][self.cfg['group_variables_by']] + benchmark_datasets = [d for d in datasets if + d.get('benchmark_dataset', False)] + if len(benchmark_datasets) >= 1: + return benchmark_datasets + + raise ValueError( + f"Expected at least 1 benchmark dataset (with " + f"'benchmark_dataset: true' for variable " + f"'{variable}'), got {len(benchmark_datasets):d}") + + def _get_benchmark_group(self, datasets): + """Get datasets for benchmarking.""" + # variable = datasets[0][self.cfg['group_variables_by']] + benchmark_datasets = [d for d in datasets if not + (d.get('benchmark_dataset', False) or + d.get('reference_for_metric', False))] + return benchmark_datasets + + def _get_benchmark_mask(self, cube, percentile_dataset, metric): + """Create mask for benchmarking cube depending on metric.""" + mask_cube = cube.copy() + + idx0 = 0 # index largest percentile + idx1 = len(percentile_dataset) - 1 # index smallest percentile + + if metric == 'bias': + maxabs_perc = np.maximum(np.abs(percentile_dataset[idx0].data), + np.abs(percentile_dataset[idx1].data)) + mask = np.where(np.abs(cube.data) >= maxabs_perc, 0, 1) + elif metric == 'emd': + mask = np.where(cube.data >= percentile_dataset[idx0].data, 0, 1) + elif metric == 'pearsonr': + mask = np.where(cube.data <= percentile_dataset[idx0].data, 0, 1) + elif metric == 'rmse': + mask = np.where(cube.data >= percentile_dataset[idx0].data, 0, 1) + else: + raise ValueError( + f"Could not create benchmarking mask, unknown benchmarking " + f"metric: '{metric}'") + + mask_cube.data = mask + return mask_cube + + def _get_benchmark_metric(self, datasets): + """Get benchmarking metric.""" + short_name = datasets[0].get('short_name') + if 'rmse' in short_name: + metric = 'rmse' + elif 'pearsonr' in short_name: + metric = 'pearsonr' + elif 'emd' in short_name: + metric = 'emd' + else: + metric = 'bias' # default + logger.info( + "Could not determine metric from short_name, " + "assuming benchmarking metric = %s", metric) + return metric + + def _get_benchmark_percentiles(self, datasets): + """Get percentile datasets from multi-model statistics preprocessor.""" + variable = datasets[0][self.cfg['group_variables_by']] + percentiles = [] + for dataset in datasets: + statistics = dataset.get('multi_model_statistics') + if statistics: + if "Percentile" in statistics: + percentiles.append(dataset) + + # *** sort percentiles by size *** + + # get percentiles as integers + iperc = [] + for dataset in percentiles: + stat = dataset.get('multi_model_statistics') + perc = stat.replace('MultiModelPercentile', '') + iperc.append(int(perc)) + + idx = list(range(len(percentiles))) + # sort list of percentile datasets by percentile with highest + # percentile first (descending order) + zipped_pairs = zip(iperc, idx) + zval = [x for _, x in sorted(zipped_pairs, reverse=True)] + perc_sorted = [percentiles[i] for i in zval] + percentiles = perc_sorted + + # get number of percentiles expected depending on benchmarking metric + + metric = self._get_benchmark_metric(datasets) + + if metric == 'bias': + numperc = 2 + elif metric == 'rmse': + numperc = 1 + elif metric == 'pearsonr': + numperc = 1 + elif metric == 'emd': + numperc = 1 + else: + raise ValueError(f"Unknown benchmarking metric: '{metric}'.") + + if len(percentiles) >= numperc: + return percentiles + + raise ValueError( + f"Expected at least '{numperc}' percentile datasets (created " + f"'with multi-model statistics preprocessor for variable " + f"'{variable}'), got {len(percentiles):d}") + def create_timeseries_plot(self, datasets): """Create time series plot.""" plot_type = 'timeseries' @@ -1952,9 +2433,9 @@ def create_timeseries_plot(self, datasets): provenance_logger.log(plot_path, provenance_record) provenance_logger.log(netcdf_path, provenance_record) - def create_annual_cycle_plot(self, datasets): - """Create annual cycle plot.""" - plot_type = 'annual_cycle' + def create_benchmarking_timeseries(self, datasets): + """Create time series benchmarking plot.""" + plot_type = 'benchmarking_timeseries' if plot_type not in self.plots: return @@ -1962,27 +2443,221 @@ def create_annual_cycle_plot(self, datasets): raise ValueError(f"No input data to plot '{plot_type}' given") logger.info("Plotting %s", plot_type) + + # Get dataset to be benchmarked + plot_datasets = self._get_benchmark_datasets(datasets) + # Get percentiles from multi-model statistics + percentile_dataset = self._get_benchmark_percentiles(datasets) + fig = plt.figure(**self.cfg['figure_kwargs']) axes = fig.add_subplot() + # load data + + percentile_data = [] + + for dataset_to_load in percentile_dataset: + filename = dataset_to_load['filename'] + logger.info("Loading %s", filename) + cube = iris.load_cube(filename) + percentile_data.append(cube) + # Plot all datasets in one single figure ancestors = [] cubes = {} - for dataset in datasets: - ancestors.append(dataset['filename']) - cube = dataset['cube'] - cubes[self._get_label(dataset)] = cube - self._check_cube_dimensions(cube, plot_type) - # Plot annual cycle + for dataset in plot_datasets: plot_kwargs = self._get_plot_kwargs(plot_type, dataset) - plot_kwargs['axes'] = axes - iris.plot.plot(cube, **plot_kwargs) + iris.plot.plot(dataset['cube'], **plot_kwargs) + + yval2 = percentile_dataset[0]['cube'] + if len(percentile_dataset) > 1: + idx = len(percentile_dataset) - 1 + yval1 = percentile_dataset[idx]['cube'] + else: + yval1 = yval2.copy() + ymin, __ = axes.get_ylim() + yval1.data = np.full(len(yval1.data), ymin) + + dataset = plot_datasets[0] + iris.plot.fill_between(dataset['cube'].coord('time'), yval1, yval2, + facecolor='lightblue', edgecolor='lightblue', + linewidth=3, zorder=1, alpha=0.8) # Default plot appearance multi_dataset_facets = self._get_multi_dataset_facets(datasets) axes.set_title(multi_dataset_facets['long_name']) - axes.set_xlabel('Month') + axes.set_xlabel('time') + # apply time formatting + if self.plots[plot_type]['time_format'] is not None: + axes.get_xaxis().set_major_formatter( + mdates.DateFormatter(self.plots[plot_type]['time_format'])) + axes.set_ylabel( + f"{multi_dataset_facets[self.cfg['group_variables_by']]} " + f"[{multi_dataset_facets['units']}]" + ) + gridline_kwargs = self._get_gridline_kwargs(plot_type) + if gridline_kwargs is not False: + axes.grid(**gridline_kwargs) + + # Legend + legend_kwargs = self.plots[plot_type]['legend_kwargs'] + if legend_kwargs is not False: + axes.legend(**legend_kwargs) + + # Customize plot appearance + self._process_pyplot_kwargs(plot_type, multi_dataset_facets) + + # Save plot + plot_path = self.get_plot_path(plot_type, multi_dataset_facets) + fig.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDF file + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + var_attrs = { + n: datasets[0][n] for n in ('short_name', 'long_name', 'units') + } + cubes[self._get_label(dataset)] = dataset['cube'] + io.save_1d_data(cubes, netcdf_path, 'time', var_attrs) + + # Provenance tracking + caption = (f"Time series of {multi_dataset_facets['long_name']} for " + f"various datasets.") + provenance_record = { + 'ancestors': ancestors, + 'authors': ['schlund_manuel'], + 'caption': caption, + 'plot_types': ['line'], + 'long_names': [var_attrs['long_name']], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + provenance_logger.log(netcdf_path, provenance_record) + + def create_annual_cycle_plot(self, datasets): + """Create annual cycle plot.""" + plot_type = 'annual_cycle' + if plot_type not in self.plots: + return + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + logger.info("Plotting %s", plot_type) + fig = plt.figure(**self.cfg['figure_kwargs']) + axes = fig.add_subplot() + + # Plot all datasets in one single figure + ancestors = [] + cubes = {} + for dataset in datasets: + ancestors.append(dataset['filename']) + cube = dataset['cube'] + cubes[self._get_label(dataset)] = cube + self._check_cube_dimensions(cube, plot_type) + + # Plot annual cycle + plot_kwargs = self._get_plot_kwargs(plot_type, dataset) + plot_kwargs['axes'] = axes + iris.plot.plot(cube, **plot_kwargs) + + # Default plot appearance + multi_dataset_facets = self._get_multi_dataset_facets(datasets) + axes.set_title(multi_dataset_facets['long_name']) + axes.set_xlabel('Month') + axes.set_ylabel( + f"{multi_dataset_facets[self.cfg['group_variables_by']]} " + f"[{multi_dataset_facets['units']}]" + ) + axes.set_xticks(range(1, 13), [str(m) for m in range(1, 13)]) + gridline_kwargs = self._get_gridline_kwargs(plot_type) + if gridline_kwargs is not False: + axes.grid(**gridline_kwargs) + + # Legend + legend_kwargs = self.plots[plot_type]['legend_kwargs'] + if legend_kwargs is not False: + axes.legend(**legend_kwargs) + + # Customize plot appearance + self._process_pyplot_kwargs(plot_type, multi_dataset_facets) + + # Save plot + plot_path = self.get_plot_path(plot_type, multi_dataset_facets) + fig.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDF file + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + var_attrs = { + n: datasets[0][n] for n in ('short_name', 'long_name', 'units') + } + io.save_1d_data(cubes, netcdf_path, 'month_number', var_attrs) + + # Provenance tracking + caption = (f"Annual cycle of {multi_dataset_facets['long_name']} for " + f"various datasets.") + provenance_record = { + 'ancestors': ancestors, + 'authors': ['schlund_manuel'], + 'caption': caption, + 'plot_types': ['seas'], + 'long_names': [var_attrs['long_name']], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + provenance_logger.log(netcdf_path, provenance_record) + + def create_benchmarking_annual(self, datasets): + """Create benchmarking annual cycle plot.""" + plot_type = 'benchmarking_annual_cycle' + if plot_type not in self.plots: + return + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + logger.info("Plotting %s", plot_type) + + # Get dataset to be benchmarked + plot_datasets = self._get_benchmark_datasets(datasets) + # Get percentiles from multi-model statistics + percentile_dataset = self._get_benchmark_percentiles(datasets) + + fig = plt.figure(**self.cfg['figure_kwargs']) + axes = fig.add_subplot() + + # Plot all datasets in one single figure + ancestors = [] + cubes = {} + + # Plot annual cycle(s) + for dataset in plot_datasets: + cube = dataset['cube'] + plot_kwargs = self._get_plot_kwargs(plot_type, dataset) + plot_kwargs['axes'] = axes + iris.plot.plot(cube, **plot_kwargs) + + yval2 = percentile_dataset[0]['cube'] + if len(percentile_dataset) > 1: + idx = len(percentile_dataset) - 1 + yval1 = percentile_dataset[idx]['cube'] + else: + yval1 = yval2.copy() + ymin, __ = axes.get_ylim() + yval1.data = np.full(len(yval1.data), ymin) + + iris.plot.fill_between(cube.coord('month_number'), yval1, yval2, + facecolor='lightblue', + linewidth=0, zorder=1, alpha=0.8) + + # Default plot appearance + multi_dataset_facets = self._get_multi_dataset_facets(datasets) + axes.set_title(multi_dataset_facets['long_name']) + axes.set_xlabel('Month') axes.set_ylabel( f"{multi_dataset_facets[self.cfg['group_variables_by']]} " f"[{multi_dataset_facets['units']}]" @@ -2011,6 +2686,8 @@ def create_annual_cycle_plot(self, datasets): var_attrs = { n: datasets[0][n] for n in ('short_name', 'long_name', 'units') } + dataset = plot_datasets[0] + cubes[self._get_label(dataset)] = dataset['cube'] io.save_1d_data(cubes, netcdf_path, 'month_number', var_attrs) # Provenance tracking @@ -2027,6 +2704,268 @@ def create_annual_cycle_plot(self, datasets): provenance_logger.log(plot_path, provenance_record) provenance_logger.log(netcdf_path, provenance_record) + def create_diurnal_cycle_plot(self, datasets): + """Create diurnal cycle plot.""" + plot_type = 'diurnal_cycle' + if plot_type not in self.plots: + return + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + logger.info("Plotting %s", plot_type) + fig = plt.figure(**self.cfg['figure_kwargs']) + axes = fig.add_subplot() + + # Plot all datasets in one single figure + ancestors = [] + cubes = {} + for dataset in datasets: + ancestors.append(dataset['filename']) + cube = dataset['cube'] + cubes[self._get_label(dataset)] = cube + self._check_cube_dimensions(cube, plot_type) + + # Plot diurnal cycle + plot_kwargs = self._get_plot_kwargs(plot_type, dataset) + plot_kwargs['axes'] = axes + iris.plot.plot(cube, **plot_kwargs) + + # Default plot appearance + multi_dataset_facets = self._get_multi_dataset_facets(datasets) + axes.set_title(multi_dataset_facets['long_name']) + axes.set_xlabel('Hour') + axes.set_ylabel( + f"{multi_dataset_facets[self.cfg['group_variables_by']]} " + f"[{multi_dataset_facets['units']}]" + ) + axes.set_xticks(range(0, 24), minor=True) + axes.set_xticks(range(0, 24, 3), [str(m) for m in range(0, 24, 3)]) + gridline_kwargs = self._get_gridline_kwargs(plot_type) + if gridline_kwargs is not False: + axes.grid(**gridline_kwargs) + + # Legend + legend_kwargs = self.plots[plot_type]['legend_kwargs'] + if legend_kwargs is not False: + axes.legend(**legend_kwargs) + + # Customize plot appearance + self._process_pyplot_kwargs(plot_type, multi_dataset_facets) + + # Save plot + plot_path = self.get_plot_path(plot_type, multi_dataset_facets) + fig.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDF file + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + var_attrs = { + n: datasets[0][n] for n in ('short_name', 'long_name', 'units') + } + io.save_1d_data(cubes, netcdf_path, 'hour', var_attrs) + + # Provenance tracking + caption = (f"Diurnal cycle of {multi_dataset_facets['long_name']} for " + f"various datasets.") + provenance_record = { + 'ancestors': ancestors, + 'authors': ['schlund_manuel'], + 'caption': caption, + 'plot_types': ['seas'], + 'long_names': [var_attrs['long_name']], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + provenance_logger.log(netcdf_path, provenance_record) + + def create_benchmarking_diurnal(self, datasets): + """Create benchmarking diurnal cycle plot.""" + plot_type = 'benchmarking_diurnal_cycle' + if plot_type not in self.plots: + return + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + logger.info("Plotting %s", plot_type) + + # Get dataset to be benchmarked + plot_datasets = self._get_benchmark_datasets(datasets) + # Get percentiles from multi-model statistics + percentile_dataset = self._get_benchmark_percentiles(datasets) + + fig = plt.figure(**self.cfg['figure_kwargs']) + axes = fig.add_subplot() + + # Plot all datasets in one single figure + ancestors = [] + cubes = {} + + # Plot diurnal cycle(s) + for dataset in plot_datasets: + cube = dataset['cube'] + plot_kwargs = self._get_plot_kwargs(plot_type, dataset) + plot_kwargs['axes'] = axes + iris.plot.plot(cube, **plot_kwargs) + + yval2 = percentile_dataset[0]['cube'] + if len(percentile_dataset) > 1: + idx = len(percentile_dataset) - 1 + yval1 = percentile_dataset[idx]['cube'] + else: + yval1 = yval2.copy() + ymin, __ = axes.get_ylim() + yval1.data = np.full(len(yval1.data), ymin) + + iris.plot.fill_between(cube.coord('hour'), yval1, yval2, + facecolor='lightblue', + linewidth=0, + zorder=1, alpha=0.8) + + # Default plot appearance + multi_dataset_facets = self._get_multi_dataset_facets(datasets) + axes.set_title(multi_dataset_facets['long_name']) + axes.set_xlabel('Hour') + axes.set_ylabel( + f"{multi_dataset_facets[self.cfg['group_variables_by']]} " + f"[{multi_dataset_facets['units']}]" + ) + axes.set_xticks(range(0, 24), minor=True) + axes.set_xticks(range(0, 24, 3), [str(m) for m in range(0, 24, 3)]) + gridline_kwargs = self._get_gridline_kwargs(plot_type) + if gridline_kwargs is not False: + axes.grid(**gridline_kwargs) + + # Legend + legend_kwargs = self.plots[plot_type]['legend_kwargs'] + if legend_kwargs is not False: + axes.legend(**legend_kwargs) + + # Customize plot appearance + self._process_pyplot_kwargs(plot_type, multi_dataset_facets) + + # Save plot + plot_path = self.get_plot_path(plot_type, multi_dataset_facets) + fig.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDF file + netcdf_path = get_diagnostic_filename(Path(plot_path).stem, self.cfg) + var_attrs = { + n: datasets[0][n] for n in ('short_name', 'long_name', 'units') + } + dataset = plot_datasets[0] + cubes[self._get_label(dataset)] = dataset['cube'] + io.save_1d_data(cubes, netcdf_path, 'hour', var_attrs) + + # Provenance tracking + caption = (f"Diurnal cycle of {multi_dataset_facets['long_name']} for " + f"various datasets.") + provenance_record = { + 'ancestors': ancestors, + 'authors': ['schlund_manuel'], + 'caption': caption, + 'plot_types': ['seas'], + 'long_names': [var_attrs['long_name']], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + provenance_logger.log(netcdf_path, provenance_record) + + def create_benchmarking_boxplot(self): + """Create boxplot.""" + plot_type = 'benchmarking_boxplot' + if plot_type not in self.plots: + return + + dframe = pd.DataFrame(columns=['Variable', 'Dataset', 'Value']) + ifile = 0 + + cubes = iris.cube.CubeList() + benchmark_datasets = [] + variables = [] + + for (var_key, datasets) in self.grouped_input_data.items(): + logger.info("Processing variable %s", var_key) + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + # Get dataset to be benchmarked + plot_datasets = self._get_benchmark_datasets(datasets) + benchmark_dataset = plot_datasets[0] + + logger.info("Plotting %s for dataset %s", + plot_type, benchmark_dataset['dataset']) + + # Get datasets for benchmarking + benchmark_group = self._get_benchmark_group(datasets) + logger.info("Benchmarking group of %i datasets.", + len(benchmark_group)) + + ancestors = [benchmark_dataset['filename']] + for dataset in benchmark_group: + ancestors.append(dataset['filename']) + + for dataset in benchmark_group: + dataset_name = dataset['dataset'] + cube = iris.load_cube(dataset['filename']) + dframe.loc[ifile] = [var_key, dataset_name, cube.data] + ifile = ifile + 1 + + dframe['Value'] = dframe['Value'].astype(str).astype(float) + + cubes.append(benchmark_dataset['cube']) + benchmark_datasets.append(benchmark_dataset) + variables.append(var_key) + + # order of variables + if self.plots[plot_type]['var_order']: + var_order = self.plots[plot_type]['var_order'] + if set(variables) == set(var_order): + ind = [variables.index(var_order[i]) + for i in range(len(variables))] + cubes = iris.cube.CubeList([cubes[i] for i in ind]) + benchmark_datasets = [benchmark_datasets[i] for i in ind] + variables = var_order + else: + raise ValueError("List of ordered variables do not agree with" + " processed variables") + + (plot_path, netcdf_paths) = ( + self._plot_benchmarking_boxplot(dframe, cubes, variables, + benchmark_datasets) + ) + + # Save plot + plt.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDF file + for (netcdf_path, cube) in netcdf_paths.items(): + io.iris_save(cube, netcdf_path) + + # Provenance tracking + caption = ( + "Boxplot." + # f"Boxplot of {dataset['long_name']} of dataset " + # f"{dataset['dataset']} (project {dataset['project']}) " + # f"from {dataset['start_year']} to {dataset['end_year']}." + ) + provenance_record = { + 'ancestors': ancestors, + 'authors': ['bock_lisa', 'schlund_manuel'], + 'caption': caption, + 'plot_types': ['box'], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + provenance_logger.log(netcdf_path, provenance_record) + def create_map_plot(self, datasets): """Create map plot.""" plot_type = 'map' @@ -2100,6 +3039,78 @@ def create_map_plot(self, datasets): for netcdf_path in netcdf_paths: provenance_logger.log(netcdf_path, provenance_record) + def create_benchmarking_map_plot(self, datasets): + """Create benchmarking map plot.""" + plot_type = 'benchmarking_map' + if plot_type not in self.plots: + return + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + # Get reference dataset + ref_dataset = self._get_benchmarking_reference(datasets) + # Get dataset to be benchmarked + plot_datasets = self._get_benchmark_datasets(datasets) + # Get percentiles from multi-model statistics + percentile_dataset = self._get_benchmark_percentiles(datasets) + # Get benchmarking metric + metric = self._get_benchmark_metric(datasets) + + # Get plot function + plot_func = self._get_plot_func(plot_type) + + # load data + + percentile_data = [] + + for dataset_to_load in percentile_dataset: + filename = dataset_to_load['filename'] + logger.info("Loading %s", filename) + cube = iris.load_cube(filename) + percentile_data.append(cube) + + for dataset in plot_datasets: + ancestors = [dataset['filename']] + (plot_path, netcdf_paths) = ( + self._plot_benchmarking_map(plot_func, dataset, + percentile_data, metric) + ) + caption = ( + f"Map plot of {dataset['long_name']} of dataset " + f"{dataset['dataset']} (project {dataset['project']}) " + f"from {dataset['start_year']} to {dataset['end_year']}." + ) + ancestors.append(ref_dataset['filename']) + + # If statistics are shown add a brief description to the caption + if self.plots[plot_type]['show_stats']: + caption += ( + " The number in the top left corner corresponds to the " + "spatial mean (weighted by grid cell areas).") + + # Save plot + plt.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDFs + for (netcdf_path, cube) in netcdf_paths.items(): + io.iris_save(cube, netcdf_path) + + # Provenance tracking + provenance_record = { + 'ancestors': ancestors, + 'authors': ['schlund_manuel'], + 'caption': caption, + 'plot_types': ['map'], + 'long_names': [dataset['long_name']], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + for netcdf_path in netcdf_paths: + provenance_logger.log(netcdf_path, provenance_record) + def create_zonal_mean_profile_plot(self, datasets): """Create zonal mean profile plot.""" plot_type = 'zonal_mean_profile' @@ -2175,6 +3186,82 @@ def create_zonal_mean_profile_plot(self, datasets): for netcdf_path in netcdf_paths: provenance_logger.log(netcdf_path, provenance_record) + def create_benchmarking_zonal_plot(self, datasets): + """Create benchmarking zonal mean profile plot.""" + plot_type = 'benchmarking_zonal' + if plot_type not in self.plots: + return + + if not datasets: + raise ValueError(f"No input data to plot '{plot_type}' given") + + # Get reference dataset + # ref_dataset = self._get_benchmarking_reference(datasets) + # Get dataset to be benchmarked + plot_datasets = self._get_benchmark_datasets(datasets) + # Get percentiles from multi-model statistics + percentile_dataset = self._get_benchmark_percentiles(datasets) + # Get benchmarking metric + metric = self._get_benchmark_metric(datasets) + + # Get plot function + plot_func = self._get_plot_func(plot_type) + + # Create a single plot for each dataset (incl. reference dataset if + # given) + + # load data + + percentile_data = [] + + for dataset_to_load in percentile_dataset: + filename = dataset_to_load['filename'] + logger.info("Loading %s", filename) + cube = iris.load_cube(filename) + percentile_data.append(cube) + + for dataset in plot_datasets: + (plot_path, netcdf_paths) = ( + self._plot_benchmarking_zonal(plot_func, dataset, + percentile_data, metric) + ) + ancestors = [dataset['filename']] + + caption = ( + f"Zonal mean profile of {dataset['long_name']} of dataset " + f"{dataset['dataset']} (project {dataset['project']}) from " + f"{dataset['start_year']} to {dataset['end_year']}." + ) + # ancestors.append(ref_dataset['filename']) + + # If statistics are shown add a brief description to the caption + # if self.plots[plot_type]['show_stats']: + # caption += ( + # " The number in the top left corner corresponds to the " + # "spatial mean (weighted by grid cell areas).") + + # Save plot + plt.savefig(plot_path, **self.cfg['savefig_kwargs']) + logger.info("Wrote %s", plot_path) + plt.close() + + # Save netCDFs + for (netcdf_path, cube) in netcdf_paths.items(): + io.iris_save(cube, netcdf_path) + + # Provenance tracking + provenance_record = { + 'ancestors': ancestors, + 'authors': ['schlund_manuel'], + 'caption': caption, + 'plot_types': ['vert'], + 'long_names': [dataset['long_name']], + } + with ProvenanceLogger(self.cfg) as provenance_logger: + provenance_logger.log(plot_path, provenance_record) + for netcdf_path in netcdf_paths: + provenance_logger.log(netcdf_path, provenance_record) + def create_1d_profile_plot(self, datasets): """Create 1D profile plot.""" plot_type = '1d_profile' @@ -2497,10 +3584,17 @@ def create_hovmoeller_time_vs_lat_or_lon_plot(self, datasets): def compute(self): """Plot preprocessed data.""" + self.create_benchmarking_boxplot() for (var_key, datasets) in self.grouped_input_data.items(): logger.info("Processing variable %s", var_key) self.create_timeseries_plot(datasets) self.create_annual_cycle_plot(datasets) + self.create_diurnal_cycle_plot(datasets) + self.create_benchmarking_annual(datasets) + self.create_benchmarking_diurnal(datasets) + self.create_benchmarking_map_plot(datasets) + self.create_benchmarking_timeseries(datasets) + self.create_benchmarking_zonal_plot(datasets) self.create_map_plot(datasets) self.create_zonal_mean_profile_plot(datasets) self.create_1d_profile_plot(datasets) diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_annual_cycle.yml b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_annual_cycle.yml new file mode 100644 index 0000000000..32d6b3aaeb --- /dev/null +++ b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_annual_cycle.yml @@ -0,0 +1,197 @@ +# ESMValTool +--- +documentation: + title: Benchmarking of a single model. + description: > + Benchmarking: annual cycle. + authors: + - lauer_axel + - bock_lisa + - hassler_birgit + - ruhe_lukas + - schlund_manuel + maintainer: + - lauer_axel + references: + - lauer24gmd + projects: + - dlrmabak + + +# Note: the following models are just examples +datasets: + - {dataset: ACCESS-CM2, grid: gn, institute: CSIRO-ARCCSS} + - {dataset: ACCESS-ESM1-5, grid: gn, institute: CSIRO} + - {dataset: AWI-CM-1-1-MR, grid: gn} + - {dataset: AWI-ESM-1-1-LR, grid: gn} + - {dataset: BCC-CSM2-MR, grid: gn} + - {dataset: BCC-ESM1, grid: gn} + - {dataset: CAMS-CSM1-0, grid: gn} + - {dataset: CanESM5, grid: gn} + - {dataset: CanESM5-CanOE, grid: gn, ensemble: r1i1p2f1} + - {dataset: CESM2, grid: gn} + - {dataset: CESM2-FV2, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM-FV2, grid: gn, institute: NCAR} + - {dataset: CIESM} + - {dataset: CNRM-CM6-1, ensemble: r1i1p1f2} + - {dataset: CNRM-CM6-1-HR, ensemble: r1i1p1f2} + - {dataset: CNRM-ESM2-1, ensemble: r1i1p1f2} + - {dataset: E3SM-1-0} + - {dataset: E3SM-1-1, institute: E3SM-Project} + - {dataset: EC-Earth3-Veg} + - {dataset: FGOALS-f3-L} + - {dataset: FGOALS-g3, grid: gn} + - {dataset: FIO-ESM-2-0, grid: gn} + - {dataset: GFDL-ESM4, grid: gr1} + - {dataset: GISS-E2-1-G, grid: gn} + - {dataset: GISS-E2-1-H, grid: gn} + - {dataset: HadGEM3-GC31-LL, ensemble: r1i1p1f3, grid: gn} + - {dataset: HadGEM3-GC31-MM, ensemble: r1i1p1f3, grid: gn} + - {dataset: INM-CM4-8, grid: gr1} + - {dataset: INM-CM5-0, grid: gr1} + - {dataset: IPSL-CM6A-LR} + - {dataset: KACE-1-0-G} + - {dataset: MCM-UA-1-0, grid: gn} + - {dataset: MIROC-ES2L, ensemble: r1i1p1f2, grid: gn} + - {dataset: MPI-ESM-1-2-HAM, grid: gn} + - {dataset: MPI-ESM1-2-HR, grid: gn} + - {dataset: MPI-ESM1-2-LR, grid: gn} + - {dataset: MRI-ESM2-0, grid: gn} + - {dataset: NESM3, grid: gn} + - {dataset: NorESM2-LM, grid: gn, institute: NCC} + - {dataset: NorESM2-MM, grid: gn, institute: NCC} + - {dataset: SAM0-UNICON, grid: gn} + - {dataset: UKESM1-0-LL, ensemble: r1i1p1f2, grid: gn} + # Dataset to be benchmarked + - {dataset: MIROC6, grid: gn, alias: MIROC6, benchmark_dataset: true} + +preprocessors: + + pp_tas: + regrid: + target_grid: 2x2 + scheme: linear + climate_statistics: + period: month + area_statistics: + operator: mean + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, MIROC6] + + pp_tas_metric: + custom_order: true + regrid_time: + calendar: standard + regrid: + target_grid: 2x2 + scheme: linear + climate_statistics: + period: month + distance_metric: + metric: rmse + coords: [longitude, latitude] + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, MIROC6] + + +diagnostics: + + annual_cycle: + description: Create "classical" annual cycle plot including a reference dataset. + variables: + tas: + timerange: '2000/2004' + preprocessor: pp_tas + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + reference_dataset: HadCRUT5 + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, version: 5.0.1.0-analysis, tier: 2, alias: HadCRUT5} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + facet_used_for_labels: alias + plots: + annual_cycle: + annual_mean_kwargs: False + plot_kwargs: + 'MIROC6': + color: red + label: '{alias}' + linestyle: '-' + linewidth: 2 + zorder: 4 + HadCRUT5: + color: black + label: '{dataset}' + linestyle: '-' + linewidth: 2 + zorder: 3 + MultiModelPercentile10: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + MultiModelPercentile90: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + default: + color: lightgray + label: null + linestyle: '-' + linewidth: 1 + zorder: 1 + + benchmarking_annual_cycle: + description: Create "benchmarking" annual cycle plot. + variables: + tas: + timerange: '2000/2004' + preprocessor: pp_tas_metric + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + reference_dataset: HadCRUT5 + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, version: 5.0.1.0-analysis, tier: 2, reference_for_metric: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + facet_used_for_labels: alias + plots: + benchmarking_annual_cycle: + plot_kwargs: + annual_mean_kwargs: False + plot_kwargs: + 'MIROC6': + color: red + label: '{alias}' + linestyle: '-' + linewidth: 1.5 + zorder: 3 diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_boxplots.yml b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_boxplots.yml new file mode 100644 index 0000000000..43cfdb05fe --- /dev/null +++ b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_boxplots.yml @@ -0,0 +1,437 @@ +# ESMValTool +--- +documentation: + title: Benchmarkig of a single model. + + description: > + Benchmarking: Box plots. + + authors: + - lauer_axel + - bock_lisa + - hassler_birgit + - ruhe_lukas + - schlund_manuel + + maintainer: + - bock_lisa + + references: + - lauer24gmd + + projects: + - dlrmabak + + +datasets: + - {dataset: ACCESS-CM2, grid: gn, institute: CSIRO-ARCCSS} + - {dataset: ACCESS-ESM1-5, grid: gn, institute: CSIRO} + - {dataset: AWI-CM-1-1-MR, grid: gn} + - {dataset: AWI-ESM-1-1-LR, grid: gn} + - {dataset: BCC-CSM2-MR, grid: gn} + - {dataset: BCC-ESM1, grid: gn} + - {dataset: CAMS-CSM1-0, grid: gn} + - {dataset: CanESM5, grid: gn} + - {dataset: CanESM5-CanOE, grid: gn, ensemble: r1i1p2f1} + - {dataset: CESM2, grid: gn} + - {dataset: CESM2-FV2, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM-FV2, grid: gn, institute: NCAR} + - {dataset: CIESM} + - {dataset: CNRM-CM6-1, ensemble: r1i1p1f2} + - {dataset: CNRM-CM6-1-HR, ensemble: r1i1p1f2} + - {dataset: CNRM-ESM2-1, ensemble: r1i1p1f2} + - {dataset: E3SM-1-0} + - {dataset: E3SM-1-1, institute: E3SM-Project} + - {dataset: EC-Earth3-Veg} + - {dataset: FGOALS-f3-L} + - {dataset: FGOALS-g3, grid: gn} + - {dataset: GFDL-ESM4, grid: gr1} + - {dataset: GISS-E2-1-G, grid: gn} + - {dataset: GISS-E2-1-H, grid: gn} + - {dataset: HadGEM3-GC31-LL, ensemble: r1i1p1f3, grid: gn} + - {dataset: HadGEM3-GC31-MM, ensemble: r1i1p1f3, grid: gn} + - {dataset: INM-CM4-8, grid: gr1} + - {dataset: INM-CM5-0, grid: gr1} + - {dataset: IPSL-CM6A-LR} + - {dataset: KACE-1-0-G} + - {dataset: MIROC-ES2L, ensemble: r1i1p1f2, grid: gn} + - {dataset: MPI-ESM-1-2-HAM, grid: gn} + - {dataset: MPI-ESM1-2-HR, grid: gn} + - {dataset: MPI-ESM1-2-LR, grid: gn} + - {dataset: MRI-ESM2-0, grid: gn} + - {dataset: NESM3, grid: gn} + - {dataset: NorESM2-LM, grid: gn, institute: NCC} + - {dataset: NorESM2-MM, grid: gn, institute: NCC} + - {dataset: SAM0-UNICON, grid: gn} + - {dataset: UKESM1-0-LL, ensemble: r1i1p1f2, grid: gn} + # Dataset to be benchmarked + - {dataset: MIROC6, grid: gn, benchmark_dataset: true} + + +VAR_SETTINGS: &var_settings + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + timerange: '2000/2004' + + +preprocessors: + + rmse: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + distance_metric: + metric: weighted_rmse + coords: [latitude, longitude] + + rmse_pr: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + convert_units: + units: mm day-1 + distance_metric: + metric: weighted_rmse + coords: [latitude, longitude] + + rmse_sst: + custom_order: true + climate_statistics: + operator: mean + mask_below_threshold: + threshold: 273.15 + regrid: + target_grid: 2x2 + scheme: nearest + distance_metric: + metric: weighted_rmse + coords: [latitude, longitude] + + rmse_land: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + mask_landsea: + mask_out: sea + distance_metric: + metric: weighted_rmse + coords: [latitude, longitude] + + pearsonr: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + distance_metric: + metric: weighted_pearsonr + coords: [latitude, longitude] + + pearsonr_pr: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + convert_units: + units: mm day-1 + distance_metric: + metric: weighted_pearsonr + coords: [latitude, longitude] + + pearsonr_sst: + custom_order: true + climate_statistics: + operator: mean + mask_below_threshold: + threshold: 273.15 + regrid: + target_grid: 2x2 + scheme: nearest + distance_metric: + metric: weighted_pearsonr + coords: [latitude, longitude] + + pearsonr_land: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + mask_landsea: + mask_out: sea + distance_metric: + metric: weighted_pearsonr + coords: [latitude, longitude] + + emd: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + distance_metric: + metric: weighted_emd + coords: [latitude, longitude] + + emd_pr: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + convert_units: + units: mm day-1 + distance_metric: + metric: weighted_emd + coords: [latitude, longitude] + + emd_sst: + custom_order: true + climate_statistics: + operator: mean + mask_below_threshold: + threshold: 273.15 + regrid: + target_grid: 2x2 + scheme: nearest + distance_metric: + metric: weighted_emd + coords: [latitude, longitude] + + emd_land: + custom_order: true + climate_statistics: + operator: mean + regrid: + target_grid: 2x2 + scheme: nearest + mask_landsea: + mask_out: sea + distance_metric: + metric: weighted_emd + coords: [latitude, longitude] + + +diagnostics: + + plot_boxplots_rmse: + description: Plot boxplots for different variables. + variables: + tas_land: + <<: *var_settings + preprocessor: rmse_land + short_name: tas + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, + version: 5.0.1.0-analysis, tier: 2, reference_for_metric: true} + lwcre: + <<: *var_settings + preprocessor: rmse + derive: true + force_derivation: true + channel: Amon + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + pr: + <<: *var_settings + preprocessor: rmse_pr + additional_datasets: + - {dataset: GPCP-SG, project: OBS, type: atmos, version: 2.3, tier: 2, + reference_for_metric: true} + psl: + <<: *var_settings + preprocessor: rmse + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: v1, + tier: 3, reference_for_metric: true} + rlut: + <<: *var_settings + preprocessor: rmse + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + rsut: + <<: *var_settings + preprocessor: rmse + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + swcre: + <<: *var_settings + preprocessor: rmse + derive: true + force_derivation: true + channel: Amon + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + sst: + <<: *var_settings + preprocessor: rmse_sst + short_name: ts + additional_datasets: + - {dataset: HadISST, project: OBS, type: reanaly, version: 1, tier: 2, reference_for_metric: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + benchmarking_boxplot: + var_order: ['tas_land', 'sst', 'pr', 'psl', 'rsut', 'rlut', 'swcre', 'lwcre'] + + + plot_boxplots_pearsonr: + description: Plot boxplots for different variables. + variables: + tas_land: + <<: *var_settings + short_name: tas + preprocessor: pearsonr_land + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, + version: 5.0.1.0-analysis, tier: 2, reference_for_metric: true} + lwcre: + <<: *var_settings + preprocessor: pearsonr + derive: true + force_derivation: true + channel: Amon + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + pr: + <<: *var_settings + preprocessor: pearsonr_pr + additional_datasets: + - {dataset: GPCP-SG, project: OBS, type: atmos, version: 2.3, tier: 2, + reference_for_metric: true} + psl: + <<: *var_settings + preprocessor: pearsonr + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: v1, + tier: 3, reference_for_metric: true} + rlut: + <<: *var_settings + preprocessor: pearsonr + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + rsut: + <<: *var_settings + preprocessor: pearsonr + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + swcre: + <<: *var_settings + preprocessor: pearsonr + derive: true + force_derivation: true + channel: Amon + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + sst: + <<: *var_settings + preprocessor: pearsonr_sst + short_name: ts + additional_datasets: + - {dataset: HadISST, project: OBS, type: reanaly, version: 1, tier: 2, reference_for_metric: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + benchmarking_boxplot: + var_order: ['tas_land', 'sst', 'pr', 'psl', 'rsut', 'rlut', 'swcre', 'lwcre'] + + + plot_boxplots_emd: + description: Plot boxplots for different variables. + variables: + tas_land: + <<: *var_settings + preprocessor: emd_land + short_name: tas + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, + version: 5.0.1.0-analysis, tier: 2, reference_for_metric: true} + lwcre: + <<: *var_settings + preprocessor: emd + derive: true + force_derivation: true + channel: Amon + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + pr: + <<: *var_settings + preprocessor: emd_pr + additional_datasets: + - {dataset: GPCP-SG, project: OBS, type: atmos, version: 2.3, tier: 2, + reference_for_metric: true} + psl: + <<: *var_settings + preprocessor: emd + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: v1, + tier: 3, reference_for_metric: true} + rlut: + <<: *var_settings + preprocessor: emd + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + rsut: + <<: *var_settings + preprocessor: emd + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + swcre: + <<: *var_settings + preprocessor: emd + derive: true + force_derivation: true + channel: Amon + additional_datasets: + - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.2, + tier: 2, start_year: 2001, end_year: 2020, reference_for_metric: true} + sst: + <<: *var_settings + preprocessor: emd_sst + short_name: ts + additional_datasets: + - {dataset: HadISST, project: OBS, type: reanaly, version: 1, tier: 2, reference_for_metric: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + benchmarking_boxplot: + var_order: ['tas_land', 'sst', 'pr', 'psl', 'rsut', 'rlut', 'swcre', 'lwcre'] diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_diurnal_cycle.yml b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_diurnal_cycle.yml new file mode 100644 index 0000000000..7f5cc8cfe3 --- /dev/null +++ b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_diurnal_cycle.yml @@ -0,0 +1,203 @@ +# ESMValTool +--- +documentation: + title: Benchmarking of a single model. + description: > + Benchmarking: diurnal cycle. + authors: + - lauer_axel + - bock_lisa + - hassler_birgit + - ruhe_lukas + - schlund_manuel + maintainer: + - lauer_axel + references: + - lauer24gmd + projects: + - dlrmabak + + +datasets: + - {dataset: ACCESS-CM2, grid: gn, institute: CSIRO-ARCCSS} + - {dataset: ACCESS-ESM1-5, grid: gn, institute: CSIRO} + - {dataset: AWI-CM-1-1-MR, grid: gn} + - {dataset: AWI-ESM-1-1-LR, grid: gn} + - {dataset: BCC-CSM2-MR, grid: gn} + - {dataset: CNRM-CM6-1, ensemble: r1i1p1f2} + - {dataset: CNRM-ESM2-1, ensemble: r1i1p1f2} + - {dataset: EC-Earth3-Veg} + - {dataset: FGOALS-g3, grid: gn} + - {dataset: GISS-E2-1-G, grid: gn} + - {dataset: HadGEM3-GC31-LL, ensemble: r1i1p1f3, grid: gn} + - {dataset: HadGEM3-GC31-MM, ensemble: r1i1p1f3, grid: gn} + - {dataset: IPSL-CM6A-LR} + - {dataset: KACE-1-0-G} + - {dataset: MIROC-ES2L, ensemble: r1i1p1f2, grid: gn} + - {dataset: MPI-ESM-1-2-HAM, grid: gn} + - {dataset: MPI-ESM1-2-HR, grid: gn} + - {dataset: MPI-ESM1-2-LR, grid: gn} + - {dataset: MRI-ESM2-0, grid: gn} + - {dataset: NESM3, grid: gn} + - {dataset: SAM0-UNICON, grid: gn} + - {dataset: UKESM1-0-LL, ensemble: r1i1p1f2, grid: gn} + # Dataset to be benchmarked + - {dataset: MIROC6, grid: gn, benchmark_dataset: true, alias: MIROC6} + + +preprocessors: + + pp_diurn_Tropics: + custom_order: true + regrid: + target_grid: 2x2 + scheme: linear + local_solar_time: + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -30 + end_latitude: 30 + mask_landsea: + mask_out: land + resample_hours: + interval: 3 + offset: 1 + interpolate: true + area_statistics: + operator: mean + climate_statistics: + period: hourly + convert_units: + units: mm day-1 + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, MIROC6] + + pp_diurn_Tropics_metric: + custom_order: true + regrid: + target_grid: 2x2 + scheme: linear + local_solar_time: + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -30 + end_latitude: 30 + mask_landsea: + mask_out: land + resample_hours: + interval: 3 + offset: 1 + interpolate: true + climate_statistics: + period: hourly + convert_units: + units: mm day-1 + distance_metric: + metric: rmse + coords: [longitude, latitude] + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, MIROC6] + + +diagnostics: + + diurnal_cycle: + description: Classical diurnal cycle plot including reference dataset. + variables: + pr_tropics: &var_settings + project: CMIP6 + timerange: 2000/2000 + preprocessor: pp_diurn_Tropics + short_name: pr + exp: historical + mip: 3hr + ensemble: r1i1p1f1 + grid: gr + reference_dataset: ERA5 + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: 'v1', frequency: 1hr, + tier: 3, reference_for_metric: true, alias: ERA5} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + plot_filename: '{plot_type}_{real_name}_{mip}' + group_variables_by: variable_group + facet_used_for_labels: alias + plots: + diurnal_cycle: + annual_mean_kwargs: False + legend_kwargs: + loc: upper right + plot_kwargs: + 'MIROC6': + color: red + label: '{alias}' + linestyle: '-' + linewidth: 2 + zorder: 4 + ERA5: + color: black + label: '{dataset}' + linestyle: '-' + linewidth: 2 + zorder: 3 + MultiModelPercentile10: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + MultiModelPercentile90: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + default: + color: lightgray + label: null + linestyle: '-' + linewidth: 1 + zorder: 1 + + benchmarking_diurnal_cycle: + description: Create "benchmarking" diurnal cycle plot. + variables: + pr_tropics: + <<: *var_settings + preprocessor: pp_diurn_Tropics_metric + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + plot_filename: '{plot_type}_{real_name}_{mip}' + group_variables_by: variable_group + facet_used_for_labels: alias + plots: + benchmarking_diurnal_cycle: + legend_kwargs: + loc: upper right + plot_kwargs: + 'MIROC6': + color: red + label: '{alias}' + linestyle: '-' + linewidth: 2 + zorder: 4 + pyplot_kwargs: + title: '{short_name}' diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_maps.yml b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_maps.yml new file mode 100644 index 0000000000..10ef7d0242 --- /dev/null +++ b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_maps.yml @@ -0,0 +1,117 @@ +# ESMValTool +--- +documentation: + title: Benchmarking of a single model. + description: > + Benchmarking: map plots. + authors: + - lauer_axel + - bock_lisa + - hassler_birgit + - ruhe_lukas + - schlund_manuel + maintainer: + - lauer_axel + references: + - lauer24gmd + projects: + - dlrmabak + + +datasets: + - {dataset: ACCESS-CM2, grid: gn, institute: CSIRO-ARCCSS} + - {dataset: ACCESS-ESM1-5, grid: gn, institute: CSIRO} + - {dataset: AWI-CM-1-1-MR, grid: gn} + - {dataset: AWI-ESM-1-1-LR, grid: gn} + - {dataset: BCC-CSM2-MR, grid: gn} + - {dataset: BCC-ESM1, grid: gn} + - {dataset: CAMS-CSM1-0, grid: gn} + - {dataset: CanESM5, grid: gn} + - {dataset: CanESM5-CanOE, grid: gn, ensemble: r1i1p2f1} + - {dataset: CESM2, grid: gn} + - {dataset: CESM2-FV2, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM-FV2, grid: gn, institute: NCAR} + - {dataset: CIESM} + - {dataset: CNRM-CM6-1, ensemble: r1i1p1f2} + - {dataset: CNRM-CM6-1-HR, ensemble: r1i1p1f2} + - {dataset: CNRM-ESM2-1, ensemble: r1i1p1f2} + - {dataset: E3SM-1-0} + - {dataset: E3SM-1-1, institute: E3SM-Project} + - {dataset: EC-Earth3-Veg} + - {dataset: FGOALS-f3-L} + - {dataset: FGOALS-g3, grid: gn} + - {dataset: FIO-ESM-2-0, grid: gn} + - {dataset: GFDL-ESM4, grid: gr1} + - {dataset: GISS-E2-1-G, grid: gn} + - {dataset: GISS-E2-1-H, grid: gn} + - {dataset: HadGEM3-GC31-LL, ensemble: r1i1p1f3, grid: gn} + - {dataset: HadGEM3-GC31-MM, ensemble: r1i1p1f3, grid: gn} + - {dataset: INM-CM4-8, grid: gr1} + - {dataset: INM-CM5-0, grid: gr1} + - {dataset: IPSL-CM6A-LR} + - {dataset: KACE-1-0-G} + - {dataset: MCM-UA-1-0, grid: gn} + - {dataset: MIROC-ES2L, ensemble: r1i1p1f2, grid: gn} + - {dataset: MPI-ESM-1-2-HAM, grid: gn} + - {dataset: MPI-ESM1-2-HR, grid: gn} + - {dataset: MPI-ESM1-2-LR, grid: gn} + - {dataset: MRI-ESM2-0, grid: gn} + - {dataset: NESM3, grid: gn} + - {dataset: NorESM2-LM, grid: gn, institute: NCC} + - {dataset: NorESM2-MM, grid: gn, institute: NCC} + - {dataset: SAM0-UNICON, grid: gn} + - {dataset: UKESM1-0-LL, ensemble: r1i1p1f2, grid: gn} + # Dataset to be benchmarked + - {dataset: MIROC6, grid: gn, benchmark_dataset: true, alias: MIROC6} + + +preprocessors: + + pp_pr: + custom_order: true + regrid_time: + calendar: standard + regrid: + target_grid: 2x2 + scheme: linear + convert_units: + units: mm day-1 + distance_metric: + metric: rmse + coords: [time] + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 90 + exclude: [reference_dataset, MIROC6] + + +diagnostics: + + benchmarking_maps: + description: Plot RMSE map. + variables: + pr: + timerange: '2000/2004' + preprocessor: pp_pr + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + reference_dataset: GPCP-SG + additional_datasets: + - {dataset: GPCP-SG, project: OBS, type: atmos, version: 2.3, tier: 2, reference_for_metric: true, alias: GPCP-SG} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + benchmarking_map: + plot_kwargs: + default: + cmap: 'cool' + levels: [0.0, 0.5, 1.0, 2.0, 3.0, 5.0, 7.5, 10.0] diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_timeseries.yml b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_timeseries.yml new file mode 100644 index 0000000000..5417deada4 --- /dev/null +++ b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_timeseries.yml @@ -0,0 +1,208 @@ +# ESMValTool +--- +documentation: + title: Benchmarking of a single model. + description: > + Benchmarking: time series. + authors: + - lauer_axel + - bock_lisa + - hassler_birgit + - ruhe_lukas + - schlund_manuel + maintainer: + - lauer_axel + references: + - lauer24gmd + projects: + - dlrmabak + + +datasets: + - {dataset: ACCESS-CM2, grid: gn, institute: CSIRO-ARCCSS} + - {dataset: ACCESS-ESM1-5, grid: gn, institute: CSIRO} + - {dataset: AWI-CM-1-1-MR, grid: gn} + - {dataset: AWI-ESM-1-1-LR, grid: gn} + - {dataset: BCC-CSM2-MR, grid: gn} + - {dataset: BCC-ESM1, grid: gn} + - {dataset: CAMS-CSM1-0, grid: gn} + - {dataset: CanESM5, grid: gn} + - {dataset: CanESM5-CanOE, grid: gn, ensemble: r1i1p2f1} + - {dataset: CESM2, grid: gn} + - {dataset: CESM2-FV2, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM-FV2, grid: gn, institute: NCAR} + - {dataset: CIESM} + - {dataset: CNRM-CM6-1, ensemble: r1i1p1f2} + - {dataset: CNRM-CM6-1-HR, ensemble: r1i1p1f2} + - {dataset: CNRM-ESM2-1, ensemble: r1i1p1f2} + - {dataset: E3SM-1-0} + - {dataset: E3SM-1-1, institute: E3SM-Project} + - {dataset: EC-Earth3-Veg} + - {dataset: FGOALS-f3-L} + - {dataset: FGOALS-g3, grid: gn} + - {dataset: FIO-ESM-2-0, grid: gn} + - {dataset: GFDL-ESM4, grid: gr1} + - {dataset: GISS-E2-1-G, grid: gn} + - {dataset: GISS-E2-1-H, grid: gn} + - {dataset: HadGEM3-GC31-LL, ensemble: r1i1p1f3, grid: gn} + - {dataset: HadGEM3-GC31-MM, ensemble: r1i1p1f3, grid: gn} + - {dataset: INM-CM4-8, grid: gr1} + - {dataset: INM-CM5-0, grid: gr1} + - {dataset: IPSL-CM6A-LR} + - {dataset: KACE-1-0-G} + - {dataset: MCM-UA-1-0, grid: gn} + - {dataset: MIROC-ES2L, ensemble: r1i1p1f2, grid: gn} + - {dataset: MPI-ESM-1-2-HAM, grid: gn} + - {dataset: MPI-ESM1-2-HR, grid: gn} + - {dataset: MPI-ESM1-2-LR, grid: gn} + - {dataset: MRI-ESM2-0, grid: gn} + - {dataset: NESM3, grid: gn} + - {dataset: NorESM2-LM, grid: gn, institute: NCC} + - {dataset: NorESM2-MM, grid: gn, institute: NCC} + - {dataset: SAM0-UNICON, grid: gn} + - {dataset: UKESM1-0-LL, ensemble: r1i1p1f2, grid: gn} + # Dataset to be benchmarked + - {dataset: MIROC6, grid: gn, benchmark_dataset: true} + +preprocessors: + + pp_tas: + regrid: + target_grid: 2x2 + scheme: linear + anomalies: + period: month + reference: + start_year: 2000 + start_month: 1 + start_day: 1 + end_year: 2009 + end_month: 12 + end_day: 31 + area_statistics: + operator: mean + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, EMAC] + + pp_tas_metric: + custom_order: true + regrid_time: + calendar: standard + regrid: + target_grid: 2x2 + scheme: linear + anomalies: + period: month + reference: + start_year: 2000 + start_month: 1 + start_day: 1 + end_year: 2009 + end_month: 12 + end_day: 31 + distance_metric: + metric: rmse + coords: [longitude, latitude] + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, EMAC] + + +diagnostics: + + timeseries: + description: Plot "classical" time series of global mean anomalies including a reference dataset. + variables: + tas: + timerange: '2000/2014' + preprocessor: pp_tas + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + reference_dataset: HadCRUT5 + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, version: 5.0.1.0-analysis, tier: 2, reference_for_metric: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + timeseries: + annual_mean_kwargs: False + plot_kwargs: + MIROC6: + color: red + label: '{dataset}' + linestyle: '-' + linewidth: 2 + zorder: 4 + HadCRUT5: + color: black + label: '{dataset}' + linestyle: '-' + linewidth: 2 + zorder: 3 + MultiModelPercentile10: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + MultiModelPercentile90: + color: gray + label: '{dataset}' + linestyle: '--' + linewidth: 1 + zorder: 2 + default: + color: lightgray + label: null + linestyle: '-' + linewidth: 1 + zorder: 1 + + benchmarking_timeseries: + description: Plot "benchmarking" time series of global mean anomalies. + variables: + tas: + timerange: '2000/2014' + preprocessor: pp_tas_metric + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + reference_dataset: HadCRUT5 + additional_datasets: + - {dataset: HadCRUT5, project: OBS, type: ground, version: 5.0.1.0-analysis, tier: 2, reference_for_metric: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + benchmarking_timeseries: + plot_kwargs: + annual_mean_kwargs: False + plot_kwargs: + MIROC6: + color: red + label: '{dataset}' + linestyle: '-' + linewidth: 1.5 + zorder: 3 diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_zonal.yml b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_zonal.yml new file mode 100644 index 0000000000..b82e603094 --- /dev/null +++ b/esmvaltool/recipes/model_evaluation/recipe_model_benchmarking_zonal.yml @@ -0,0 +1,124 @@ +# ESMValTool +--- +documentation: + title: Benchmarking of a single model. + description: > + Benchmarking: zonal mean plots. + authors: + - lauer_axel + - bock_lisa + - hassler_birgit + - ruhe_lukas + - schlund_manuel + maintainer: + - lauer_axel + references: + - lauer24gmd + projects: + - dlrmabak + + +datasets: + - {dataset: ACCESS-CM2, grid: gn, institute: CSIRO-ARCCSS} + - {dataset: ACCESS-ESM1-5, grid: gn, institute: CSIRO} + - {dataset: AWI-CM-1-1-MR, grid: gn} + - {dataset: AWI-ESM-1-1-LR, grid: gn} + - {dataset: BCC-CSM2-MR, grid: gn} + - {dataset: BCC-ESM1, grid: gn} + - {dataset: CAMS-CSM1-0, grid: gn} + - {dataset: CanESM5, grid: gn} + - {dataset: CanESM5-CanOE, grid: gn, ensemble: r1i1p2f1} + - {dataset: CESM2, grid: gn} + - {dataset: CESM2-FV2, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM, grid: gn, institute: NCAR} + - {dataset: CESM2-WACCM-FV2, grid: gn, institute: NCAR} + - {dataset: CIESM} + - {dataset: CNRM-CM6-1, ensemble: r1i1p1f2} + - {dataset: CNRM-CM6-1-HR, ensemble: r1i1p1f2} + - {dataset: CNRM-ESM2-1, ensemble: r1i1p1f2} + - {dataset: E3SM-1-0} + - {dataset: E3SM-1-1, institute: E3SM-Project} + - {dataset: EC-Earth3-Veg} + - {dataset: FGOALS-f3-L} + - {dataset: FGOALS-g3, grid: gn} + - {dataset: GFDL-ESM4, grid: gr1} + - {dataset: GISS-E2-1-G, grid: gn} + - {dataset: GISS-E2-1-H, grid: gn} + - {dataset: HadGEM3-GC31-LL, ensemble: r1i1p1f3, grid: gn} + - {dataset: HadGEM3-GC31-MM, ensemble: r1i1p1f3, grid: gn} + - {dataset: INM-CM4-8, grid: gr1} + - {dataset: INM-CM5-0, grid: gr1} + - {dataset: IPSL-CM6A-LR} + - {dataset: KACE-1-0-G} + - {dataset: MCM-UA-1-0, grid: gn} + - {dataset: MIROC-ES2L, ensemble: r1i1p1f2, grid: gn} + - {dataset: MPI-ESM-1-2-HAM, grid: gn} + - {dataset: MPI-ESM1-2-HR, grid: gn} + - {dataset: MPI-ESM1-2-LR, grid: gn} + - {dataset: MRI-ESM2-0, grid: gn} + - {dataset: NESM3, grid: gn} + - {dataset: NorESM2-LM, grid: gn, institute: NCC} + - {dataset: NorESM2-MM, grid: gn, institute: NCC} + - {dataset: SAM0-UNICON, grid: gn} + - {dataset: UKESM1-0-LL, ensemble: r1i1p1f2, grid: gn} + # Dataset to be benchmarked + - {dataset: MIROC6, grid: gn, benchmark_dataset: true} + + +preprocessors: + pp_ta: + custom_order: true + regrid_time: + calendar: standard + extract_levels: + levels: {cmor_table: CMIP6, coordinate: plev27} + coordinate: air_pressure + scheme: linear + regrid: + target_grid: 2x2 + scheme: linear + mask_below_threshold: + threshold: 0.0 + climate_statistics: + operator: mean + zonal_statistics: + operator: mean + bias: + bias_type: absolute + multi_model_statistics: + span: overlap + statistics: + - operator: percentile + percent: 10 + - operator: percentile + percent: 90 + exclude: [reference_dataset, MIROC6] + + +diagnostics: + + benchmarking_zonal: + description: Plot zonal mean profile. + variables: + ta: + timerange: '2000/2004' + preprocessor: pp_ta + project: CMIP6 + mip: Amon + exp: historical + ensemble: r1i1p1f1 + grid: gr + reference_dataset: ERA5 + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: 'v1', tier: 3, reference_for_metric: true, reference_for_bias: true} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + group_variables_by: variable_group + plots: + benchmarking_zonal: + plot_kwargs: + default: + cmap: 'bwr' + levels: [-5, -3, -2, -1, 0, 1, 2, 3, 5] diff --git a/esmvaltool/recipes/model_evaluation/recipe_model_evaluation_clouds_cycles.yml b/esmvaltool/recipes/model_evaluation/recipe_model_evaluation_clouds_cycles.yml index 8139a04dfc..431bb5246e 100644 --- a/esmvaltool/recipes/model_evaluation/recipe_model_evaluation_clouds_cycles.yml +++ b/esmvaltool/recipes/model_evaluation/recipe_model_evaluation_clouds_cycles.yml @@ -22,6 +22,9 @@ datasets: timerange_for_models: &time_period timerange: '2000/2014' # can be specified, this is just an example +timerange_diurnal: &time_diurnal_period + timerange: '20060101/20060201' # can be specified, this is just an example + preprocessors: @@ -31,6 +34,14 @@ preprocessors: climate_statistics: period: month + pp_Tropics: + <<: *global_settings + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -30 + end_latitude: 30 + pp_SEPacific: <<: *global_settings extract_region: @@ -59,21 +70,76 @@ preprocessors: start_latitude: 45 end_latitude: 60 + pp_diurn_Tropics: + custom_order: true + local_solar_time: + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -30 + end_latitude: 30 + area_statistics: + operator: mean + climate_statistics: + period: hourly + + pp_diurn_SEPacific: + custom_order: true + local_solar_time: + extract_region: + start_longitude: 265 + end_longitude: 275 + start_latitude: -25 + end_latitude: -5 + mask_landsea: + mask_out: land + area_statistics: + operator: mean + climate_statistics: + period: hourly + + pp_diurn_SouthernOcean: + custom_order: true + local_solar_time: + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -65 + end_latitude: -30 + mask_landsea: + mask_out: land + area_statistics: + operator: mean + climate_statistics: + period: hourly + + pp_diurn_StormTracks: + custom_order: true + local_solar_time: + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: 45 + end_latitude: 60 + area_statistics: + operator: mean + climate_statistics: + period: hourly + diagnostics: anncyc: description: Plot annual cycles including reference datasets. variables: - clt_global: &clt_settings + clt_tropics: &clt_settings + <<: *clt_settings <<: *time_period - preprocessor: pp_global + preprocessor: pp_Tropics short_name: clt mip: Amon additional_datasets: - {dataset: ESACCI-CLOUD, project: OBS, type: sat, version: AVHRR-AMPM-fv3.0, tier: 2} - clt_tropics: - <<: *clt_settings clt_sepacific: <<: *clt_settings preprocessor: pp_SEPacific @@ -83,15 +149,13 @@ diagnostics: clt_stormtracks: <<: *clt_settings preprocessor: pp_StormTracks - clivi_global: &clivi_settings + clivi_tropics: &clivi_settings <<: *time_period - preprocessor: pp_global + preprocessor: pp_Tropics short_name: clivi mip: Amon additional_datasets: - {dataset: ESACCI-CLOUD, project: OBS, type: sat, version: AVHRR-AMPM-fv3.0, tier: 2} - clivi_tropics: - <<: *clivi_settings clivi_sepacific: <<: *clivi_settings preprocessor: pp_SEPacific @@ -101,16 +165,14 @@ diagnostics: clivi_stormtracks: <<: *clivi_settings preprocessor: pp_StormTracks - lwp_global: &lwp_settings + lwp_tropics: &lwp_settings <<: *time_period - preprocessor: pp_global + preprocessor: pp_Tropics short_name: lwp derive: true mip: Amon additional_datasets: - {dataset: ESACCI-CLOUD, project: OBS, type: sat, version: AVHRR-AMPM-fv3.0, tier: 2} - lwp_tropics: - <<: *lwp_settings lwp_sepacific: <<: *lwp_settings preprocessor: pp_SEPacific @@ -120,16 +182,14 @@ diagnostics: lwp_stormtracks: <<: *lwp_settings preprocessor: pp_StormTracks - swcre_global: &swcre_settings + swcre_tropics: &swcre_settings <<: *time_period - preprocessor: pp_global + preprocessor: pp_Tropics short_name: swcre derive: true mip: Amon additional_datasets: - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.1, tier: 2} - swcre_tropics: - <<: *swcre_settings swcre_sepacific: <<: *swcre_settings preprocessor: pp_SEPacific @@ -139,16 +199,14 @@ diagnostics: swcre_stormtracks: <<: *swcre_settings preprocessor: pp_StormTracks - lwcre_global: &lwcre_settings + lwcre_tropics: &lwcre_settings <<: *time_period - preprocessor: pp_global + preprocessor: pp_Tropics short_name: lwcre derive: true mip: Amon additional_datasets: - {dataset: CERES-EBAF, project: OBS, type: sat, version: Ed4.1, tier: 2} - lwcre_tropics: - <<: *lwcre_settings lwcre_sepacific: <<: *lwcre_settings preprocessor: pp_SEPacific @@ -175,5 +233,46 @@ diagnostics: color: C1 ESACCI-CLOUD: color: black + CERES-EBAF: + color: black + pyplot_kwargs: + title: '{short_name}' + + diurncyc: + description: Example plot diurnal cycles including reference datasets. + variables: + clt_tropics: &clt_diurn_settings + <<: *time_diurnal_period + preprocessor: pp_diurn_Tropics + short_name: clt + mip: 3hr + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: 'v1', frequency: 1hr, tier: 3} + clt_sepacific: + <<: *clt_diurn_settings + preprocessor: pp_diurn_SEPacific + clt_southerocean: + <<: *clt_diurn_settings + preprocessor: pp_diurn_SouthernOcean + clt_stormtracks: + <<: *clt_diurn_settings + preprocessor: pp_diurn_StormTracks + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + plot_filename: '{plot_type}_{real_name}_{mip}' + group_variables_by: variable_group + plots: + diurnal_cycle: + legend_kwargs: + loc: upper right + plot_kwargs: + MPI-ESM1-2-HR: + color: C0 + MPI-ESM1-2-LR: + color: C1 + ERA5: + color: black pyplot_kwargs: title: '{short_name}' diff --git a/esmvaltool/recipes/monitor/recipe_monitor.yml b/esmvaltool/recipes/monitor/recipe_monitor.yml index a37f186583..32b773211d 100644 --- a/esmvaltool/recipes/monitor/recipe_monitor.yml +++ b/esmvaltool/recipes/monitor/recipe_monitor.yml @@ -12,7 +12,7 @@ documentation: datasets: - - {project: CMIP6, dataset: EC-Earth3, exp: historical, ensemble: r1i1p1f1, start_year: 1850, end_year: 2014} + - {project: CMIP6, dataset: EC-Earth3, exp: historical, ensemble: r1i1p1f1} preprocessors: timeseries_regular: @@ -114,6 +114,22 @@ preprocessors: climate_statistics: period: month + pp_diurn_tropics: + custom_order: true + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -30 + end_latitude: 30 + mask_landsea: + mask_out: land + local_solar_time: + area_statistics: + operator: mean + convert_units: + units: mm day-1 + + diagnostics: plot_timeseries_annual_cycle: description: "Plot time series and annualcycles" @@ -122,6 +138,7 @@ diagnostics: mip: Amon preprocessor: timeseries_regular grid: gr + timerange: 1850/2014 scripts: plot: &plot_default script: monitor/monitor.py @@ -140,12 +157,14 @@ diagnostics: mip: Omon preprocessor: nino3 grid: gn + timerange: 1850/2014 nino34: plot_name: 'Niño 3.4 index' short_name: tos mip: Omon preprocessor: nino34 grid: gn + timerange: 1850/2014 scripts: plot: <<: *plot_default @@ -161,18 +180,35 @@ diagnostics: preprocessor: mlotstnorth grid: gn plot_name: Mixed layer depth average above 50ºN + timerange: 1850/2014 mlotst-south: short_name: mlotst mip: Omon preprocessor: mlotstsouth grid: gn plot_name: Mixed layer depth average below 40ºS + timerange: 1850/2014 scripts: plot: <<: *plot_default plots: annual_cycle: {} + plot_diurnal_cycle: + description: "Plot diurnal cycle" + variables: + pr_tropics: + timerange: '20010101/20010201' # can be specified, this is just an example + preprocessor: pp_diurn_tropics + short_name: pr + mip: 3hr + grid: gr + scripts: + plot: + script: monitor/monitor.py + plots: + diurnal_cycle: {} + global_climatologies: description: "Plot map data" variables: @@ -180,76 +216,92 @@ diagnostics: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 ps: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 rsns: derive: true mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 rlns: derive: true mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 hfss: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 hfls: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 vas: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 pr: mip: Amon preprocessor: climatology_pr grid: gr + timerange: 1850/2014 evspsbl: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 ua200: short_name: ua mip: Amon preprocessor: climatology_200hPa grid: gr plot_name: Eastward Wind at 200 hPa + timerange: 1850/2014 ua500: short_name: ua mip: Amon preprocessor: climatology_500hPa grid: gr plot_name: Eastward Wind at 500 hPa + timerange: 1850/2014 zg200: short_name: zg mip: Amon preprocessor: climatology_200hPa grid: gr plot_name: Geopotential height at 200 hPa + timerange: 1850/2014 zg500: short_name: zg mip: Amon preprocessor: climatology_500hPa grid: gr plot_name: Geopotential height at 500 hPa + timerange: 1850/2014 tos: mip: Omon preprocessor: climatology grid: gn + timerange: 1850/2014 zos: mip: Omon preprocessor: climatology grid: gn + timerange: 1850/2014 sos: mip: Omon preprocessor: climatology grid: gn + timerange: 1850/2014 scripts: plot: <<: *plot_default @@ -265,18 +317,22 @@ diagnostics: mip: Amon preprocessor: climatology grid: gr + timerange: 1850/2014 pr: mip: Amon preprocessor: climatology_pr grid: gr + timerange: 1850/2014 tos: mip: Omon preprocessor: climatology grid: gn + timerange: 1850/2014 sos: mip: Omon grid: gn preprocessor: climatology + timerange: 1850/2014 scripts: plot: <<: *plot_default @@ -294,6 +350,7 @@ diagnostics: preprocessor: nao_djf eof_name: NAO as first EOF in DJF pc_name: NAO index as first PC in DJF + timerange: 1850/2014 sam: short_name: psl mip: Amon @@ -301,6 +358,7 @@ diagnostics: preprocessor: sam_jja eof_name: SAM as first EOF in JJA pc_name: SAM index as first PC in JJA + timerange: 1850/2014 scripts: eof: <<: *plot_default @@ -313,6 +371,7 @@ diagnostics: mip: SImon preprocessor: climatology grid: gn + timerange: 1850/2014 scripts: plot: @@ -333,6 +392,7 @@ diagnostics: mip: Omon preprocessor: clim_fma grid: gn + timerange: 1850/2014 scripts: plot: <<: *plot_default @@ -348,6 +408,7 @@ diagnostics: mip: Omon preprocessor: clim_aso grid: gn + timerange: 1850/2014 scripts: plot: <<: *plot_default diff --git a/esmvaltool/recipes/monitor/recipe_monitor_with_refs.yml b/esmvaltool/recipes/monitor/recipe_monitor_with_refs.yml index 681277310c..6550337afc 100644 --- a/esmvaltool/recipes/monitor/recipe_monitor_with_refs.yml +++ b/esmvaltool/recipes/monitor/recipe_monitor_with_refs.yml @@ -102,6 +102,19 @@ preprocessors: zonal_statistics: operator: mean + pp_diurn_Tropics: + custom_order: true + local_solar_time: + extract_region: + start_longitude: 0 + end_longitude: 360 + start_latitude: -30 + end_latitude: 30 + area_statistics: + operator: mean + climate_statistics: + period: hourly + diagnostics: @@ -255,3 +268,33 @@ diagnostics: common_cbar: true show_x_minor_ticks: false time_format: '%Y' + + plot_diurnal_cycle: + description: Example plot diurnal cycle including reference dataset. + variables: + clt_tropics: + preprocessor: pp_diurn_Tropics + short_name: clt + mip: 3hr + timerange: '20060101/20060201' + additional_datasets: + - {dataset: ERA5, project: native6, type: reanaly, version: 'v1', frequency: 1hr, tier: 3} + scripts: + allplots: + script: monitor/multi_datasets.py + plot_folder: '{plot_dir}' + plot_filename: '{plot_type}_{real_name}_{mip}' + group_variables_by: variable_group + plots: + diurnal_cycle: + legend_kwargs: + loc: upper right + plot_kwargs: + MPI-ESM1-2-HR: + color: C0 + MPI-ESM1-2-LR: + color: C1 + ERA5: + color: black + pyplot_kwargs: + title: '{short_name}' diff --git a/esmvaltool/references/lauer24gmd.bibtex b/esmvaltool/references/lauer24gmd.bibtex new file mode 100644 index 0000000000..bbf1c28f50 --- /dev/null +++ b/esmvaltool/references/lauer24gmd.bibtex @@ -0,0 +1,13 @@ +@article{lauer24gmd, + doi = {}, + url = {}, + year = , + publisher = {European Geosciences Union}, + address = {Göttingen Germany}, + volume = {}, + number = {}, + pages = {}, + author = {Axel Lauer and Lisa Bock and Birgit Hassler and Patrick Jöckel and Lukas Ruhe and Manuel Schlund}, + title = {Monitoring and benchmarking Earth System Model simulations with ESMValTool v2.12.0}, + journal = {Geoscientific Model Development} +}