diff --git a/.github/workflows/commit-ci.yml b/.github/workflows/commit-ci.yml index b8d81e51f..24c685ebe 100644 --- a/.github/workflows/commit-ci.yml +++ b/.github/workflows/commit-ci.yml @@ -27,7 +27,7 @@ jobs: - uses: mamba-org/setup-micromamba@v1 with: - micromamba-version: latest + micromamba-version: '1.5.10-0' environment-name: ${{ github.event.repository.name }}-ubuntu-latest-312-${{ hashFiles('requirements/dev.txt') }} environment-file: requirements/base.txt create-args: >- diff --git a/.github/workflows/pr-ci.yml b/.github/workflows/pr-ci.yml index c989b30c5..cefeb8651 100644 --- a/.github/workflows/pr-ci.yml +++ b/.github/workflows/pr-ci.yml @@ -45,7 +45,7 @@ jobs: - uses: mamba-org/setup-micromamba@v1 with: - micromamba-version: latest + micromamba-version: '1.5.10-0' environment-name: ${{ github.event.repository.name }}-${{ matrix.os }}-3${{ matrix.py3version }}-${{ hashFiles('requirements/dev.txt') }} environment-file: requirements/base.txt create-args: >- @@ -108,7 +108,7 @@ jobs: - uses: actions/checkout@v4 - uses: mamba-org/setup-micromamba@v1 with: - micromamba-version: latest + micromamba-version: '1.5.10-0' environment-file: .github/workflows/pr-ci-pipbuild-environment.yml post-cleanup: all cache-environment: true @@ -127,4 +127,4 @@ jobs: TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }} with: package_name: calliope - version: ${{needs.pre-release-version.outputs.version}} \ No newline at end of file + version: ${{needs.pre-release-version.outputs.version}} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3bbb65f63..67779e53e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ default_language_version: repos: - repo: https://github.com/astral-sh/ruff-pre-commit # https://beta.ruff.rs/docs/usage/#github-action - rev: v0.5.6 + rev: v0.6.3 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/CHANGELOG.md b/CHANGELOG.md index db3b622a9..9aecfda03 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ |changed| cost expressions in math, to split out investment costs into the capital cost (`cost_investment`), annualised capital cost (`cost_investment_annualised`), fixed operation costs (`cost_operation_fixed`) and variable operation costs (`cost_operation_variable`, previously `cost_var`) (#645). +|new| Math has been removed from `model.math`, and can now be accessed via `model.math.data` (#639). + +|new| (non-NaN) Default values and data types for parameters appear in math documentation (if they appear in the model definition schema) (#677). + +|changed| `data_sources` -> `data_tables` and `data_sources.source` -> `data_tables.data`. +This change has occurred to avoid confusion between data "sources" and model energy "sources" (#673). + ## 0.7.0.dev4 (2024-09-10) ### User-facing changes @@ -60,6 +67,12 @@ Parameter titles from the model definition schema will also propagate to the mod ### Internal changes +|changed| `model._model_def_dict` has been removed. + +|new| `CalliopeMath` is a new helper class to handle math additions, including separate methods for pre-defined math, user-defined math and validation checks. + +|changed| `MathDocumentation` has been extracted from `Model`/`LatexBackend`, and now is a postprocessing module which can take models as input. + |new| `gurobipy` is a development dependency that will be added as an optional dependency to the conda-forge calliope feedstock recipe. |changed| Added any new math dicts defined with `calliope.Model.backend.add_[...](...)` to the backend math dict registry stored in `calliope.Model.backend.inputs.attrs["math"]`. diff --git a/docs/creating/data_sources.md b/docs/creating/data_tables.md similarity index 90% rename from docs/creating/data_sources.md rename to docs/creating/data_tables.md index f8735ed73..e549be039 100644 --- a/docs/creating/data_sources.md +++ b/docs/creating/data_tables.md @@ -1,17 +1,17 @@ -# Loading tabular data (`data_sources`) +# Loading tabular data (`data_tables`) We have chosen YAML syntax to define Calliope models as it is human-readable. However, when you have a large dataset, the YAML files can become large and ultimately not as readable as we would like. For instance, for parameters that vary in time we would have a list of 8760 values and timestamps to put in our YAML file! -Therefore, alongside your YAML model definition, you can load tabular data from CSV files (or from in-memory [pandas.DataFrame][] objects) under the `data_sources` top-level key. +Therefore, alongside your YAML model definition, you can load tabular data from CSV files (or from in-memory [pandas.DataFrame][] objects) under the `data_tables` top-level key. As of Calliope v0.7.0, this tabular data can be of _any_ kind. Prior to this, loading from file was limited to timeseries data. -The full syntax from loading tabular data can be found in the associated [schema][data-source-schema]. +The full syntax from loading tabular data can be found in the associated [schema][data-table-schema]. In brief it is: -* **source**: path to file or reference name for an in-memory object. +* **data**: path to file or reference name for an in-memory object. * **rows**: the dimension(s) in your table defined per row. * **columns**: the dimension(s) in your table defined per column. * **select**: values within dimensions that you want to select from your tabular data, discarding the rest. @@ -126,9 +126,9 @@ In this section we will show some examples of loading data and provide the equiv YAML definition to load data: ```yaml - data_sources: + data_tables: pv_capacity_factor_data: - source: data_sources/pv_resource.csv + data: data_tables/pv_resource.csv rows: timesteps add_dims: techs: pv @@ -181,9 +181,9 @@ In this section we will show some examples of loading data and provide the equiv YAML definition to load data: ```yaml - data_sources: + data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: [techs, parameters] ``` @@ -224,9 +224,9 @@ In this section we will show some examples of loading data and provide the equiv YAML definition to load data: ```yaml - data_sources: + data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: [techs, parameters] add_dims: costs: monetary @@ -272,7 +272,7 @@ In this section we will show some examples of loading data and provide the equiv 1. To limit repetition, we have defined [templates](templates.md) for our costs. !!! info "See also" - Our [data source loading tutorial][loading-tabular-data] has more examples of loading tabular data into your model. + Our [data table loading tutorial][loading-tabular-data] has more examples of loading tabular data into your model. ## Selecting dimension values and dropping dimensions @@ -290,9 +290,9 @@ Data in file: YAML definition to load only data from nodes 1 and 2: ```yaml -data_sources: +data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: [techs, parameters] columns: nodes select: @@ -312,9 +312,9 @@ You will also need to `drop` the dimension so that it doesn't appear in the fina YAML definition to load only data from scenario 1: ```yaml -data_sources: +data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: [techs, parameters] columns: scenarios select: @@ -322,12 +322,12 @@ data_sources: drop: scenarios ``` -You can then also tweak just one line of your data source YAML with an [override](scenarios.md) to point to your other scenario: +You can then also tweak just one line of your data table YAML with an [override](scenarios.md) to point to your other scenario: ```yaml override: switch_to_scenario2: - data_sources.tech_data.select.scenarios: scenario2 # (1)! + data_tables.tech_data.select.scenarios: scenario2 # (1)! ``` 1. We use the dot notation as a shorthand for [abbreviate nested dictionaries](yaml.md#abbreviated-nesting). @@ -348,9 +348,9 @@ For example, to define costs for the parameter `cost_flow_cap`: | tech3 | monetary | cost_flow_cap | 20 | 45 | 50 | ```yaml - data_sources: + data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: [techs, costs, parameters] columns: nodes ``` @@ -364,9 +364,9 @@ For example, to define costs for the parameter `cost_flow_cap`: | tech3 | 20 | 45 | 50 | ```yaml - data_sources: + data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: techs columns: nodes add_dims: @@ -384,9 +384,9 @@ Or to define the same timeseries source data for two technologies at different n | 2005-01-01 01:00 | 200 | 200 | ```yaml - data_sources: + data_tables: tech_data: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: timesteps columns: [nodes, techs, parameters] ``` @@ -401,16 +401,16 @@ Or to define the same timeseries source data for two technologies at different n | 2005-01-01 01:00 | 200 | ```yaml - data_sources: + data_tables: tech_data_1: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: timesteps add_dims: techs: tech1 nodes: node1 parameters: source_use_max tech_data_2: - source: data_sources/tech_data.csv + data: data_tables/tech_data.csv rows: timesteps add_dims: techs: tech2 @@ -420,10 +420,10 @@ Or to define the same timeseries source data for two technologies at different n ## Loading CSV files vs `pandas` dataframes -To load from CSV, set the filepath in `source` to point to your file. +To load from CSV, set the filepath in `data` to point to your file. This filepath can either be relative to your `model.yaml` file (as in the above examples) or an absolute path. -To load from a [pandas.DataFrame][], you can specify the `data_source_dfs` dictionary of objects when you initialise your model: +To load from a [pandas.DataFrame][], you can specify the `data_table_dfs` dictionary of objects when you initialise your model: ```python import calliope @@ -433,19 +433,19 @@ df2 = pd.DataFrame(...) model = calliope.Model( "path/to/model.yaml", - data_source_dfs={"data_source_1": df1, "data_source_2": df2} + data_table_dfs={"data_source_1": df1, "data_source_2": df2} ) ``` -And then you point to those dictionary keys in the `source` for your data source: +And then you point to those dictionary keys in the `data` for your data table: ```yaml -data_sources: +data_tables: ds1: - source: data_source_1 + data: data_source_1 ... ds2: - source: data_source_2 + data: data_source_2 ... ``` @@ -454,7 +454,7 @@ data_sources: Rows correspond to your dataframe index levels and columns to your dataframe column levels. You _cannot_ specify [pandas.Series][] objects. - Ensure you convert them to dataframes (`to_frame()`) before adding them to your data source dictionary. + Ensure you convert them to dataframes (`to_frame()`) before adding them to your data table dictionary. ## Important considerations @@ -468,8 +468,8 @@ This could be defined in `rows`, `columns`, or `add_dims`. 3. `add_dims` to add dimensions. This means you can technically select value "A" from dimensions `nodes`, then drop `nodes`, then add `nodes` back in with the value "B". This effectively replaces "A" with "B" on that dimension. -3. The order of tabular data loading is in the order you list the sources. -If a new table has data which clashes with preceding data sources, it will override that data. +3. The order of tabular data loading is in the order you list the tables. +If a new table has data which clashes with preceding tables, it will override that data. This may have unexpected results if the files have different dimensions as the dimensions will be broadcast to match each other. 4. CSV files must have `.csv` in their filename (even if compressed, e.g., `.csv.zip`). If they don't, they won't be picked up by Calliope. @@ -481,7 +481,7 @@ E.g., nodes: node1.techs: {tech1, tech2, tech3} node2.techs: {tech1, tech2} - data_sources: + data_tables: ... ``` 6. We process dimension data after loading it in according to a limited set of heuristics: diff --git a/docs/creating/index.md b/docs/creating/index.md index c8af05876..35ddefb2d 100644 --- a/docs/creating/index.md +++ b/docs/creating/index.md @@ -35,7 +35,7 @@ We distinguish between: - the model **definition** (your representation of a physical system in YAML). Model configuration is everything under the top-level YAML key [`config`](config.md). -Model definition is everything else, under the top-level YAML keys [`parameters`](parameters.md), [`techs`](techs.md), [`nodes`](nodes.md), [`templates`](templates.md), and [`data_sources`](data_sources.md). +Model definition is everything else, under the top-level YAML keys [`parameters`](parameters.md), [`techs`](techs.md), [`nodes`](nodes.md), [`templates`](templates.md), and [`data_tables`](data_tables.md). It is possible to define alternatives to the model configuration/definition that you can refer to when you initialise your model. These are defined under the top-level YAML keys [`scenarios` and `overrides`](scenarios.md). @@ -52,7 +52,7 @@ The layout of that directory typically looks roughly like this (`+` denotes dire + model_definition - nodes.yaml - techs.yaml - + data_sources + + data_tables - solar_resource.csv - electricity_demand.csv - model.yaml @@ -63,7 +63,7 @@ In the above example, the files `model.yaml`, `nodes.yaml` and `techs.yaml` toge This definition could be in one file, but it is more readable when split into multiple. We use the above layout in the example models. -Inside the `data_sources` directory, tabular data are stored as CSV files. +Inside the `data_tables` directory, tabular data are stored as CSV files. !!! note The easiest way to create a new model is to use the `calliope new` command, which makes a copy of one of the built-in examples models: @@ -85,4 +85,4 @@ The rest of this section discusses everything you need to know to set up a model - More details on the [model configuration](config.md). - The key parts of the model definition, first, the [technologies](techs.md), then, the [nodes](nodes.md), the locations in space where technologies can be placed. - How to use [technology and node templates](templates.md) to reduce repetition in the model definition. -- Other important features to be aware of when defining your model: defining [indexed parameters](parameters.md), i.e. parameter which are not indexed over technologies and nodes, [loading tabular data](data_sources.md), and defining [scenarios and overrides](scenarios.md). +- Other important features to be aware of when defining your model: defining [indexed parameters](parameters.md), i.e. parameter which are not indexed over technologies and nodes, [loading tabular data](data_tables.md), and defining [scenarios and overrides](scenarios.md). diff --git a/docs/examples/calliope_model_object.py b/docs/examples/calliope_model_object.py index 9236e6971..a4930f434 100644 --- a/docs/examples/calliope_model_object.py +++ b/docs/examples/calliope_model_object.py @@ -36,33 +36,10 @@ # Get information on the model print(m.info()) -# %% [markdown] -# ## Model definition dictionary -# -# `m._model_def_dict` is a python dictionary that holds all the data from the model definition YAML files, restructured into one dictionary. -# -# The underscore before the method indicates that it defaults to being hidden (i.e. you wouldn't see it by trying a tab auto-complete and it isn't documented) - -# %% -m._model_def_dict.keys() - -# %% [markdown] -# `techs` hold only the information about a technology that is specific to that node - -# %% -m._model_def_dict["techs"]["pv"] - -# %% [markdown] -# `nodes` hold only the information about a technology that is specific to that node - -# %% -m._model_def_dict["nodes"]["X2"]["techs"]["pv"] - # %% [markdown] # ## Model data # -# `m._model_data` is an xarray Dataset. -# Like `_model_def_dict` it is a hidden prperty of the Model as you are expected to access the data via the public property `inputs` +# `m._model_data` is an xarray Dataset, a hidden property of the Model as you are expected to access the data via the public property `inputs` # %% m.inputs diff --git a/docs/examples/loading_tabular_data.py b/docs/examples/loading_tabular_data.py index 8dcd3327e..35fe8398d 100644 --- a/docs/examples/loading_tabular_data.py +++ b/docs/examples/loading_tabular_data.py @@ -22,9 +22,10 @@ # %% from pathlib import Path -import calliope import pandas as pd +import calliope + calliope.set_log_verbosity("INFO", include_solver_output=False) # %% [markdown] @@ -177,7 +178,7 @@ # ## Defining data in the tabular CSV format # %% [markdown] -# We could have defined these same tables in CSV files and loaded them using `data-sources`. +# We could have defined these same tables in CSV files and loaded them using `data_tables`. # We don't yet have those CSV files ready, so we'll create them programmatically. # In practice, you would likely write these files using software like Excel. @@ -188,14 +189,14 @@ # Some are long and thin with all the dimensions grouped in each row (or the `index`), while others have dimensions grouped in the columns. # This is to show what is possible. # You might choose to always have long and thin data, or to always have certain dimensions in the rows and others in the columns. -# So long as you then define your data source correctly in the model definition, so that Calliope knows exactly how to process your data, it doesn't matter what shape it is stored in. +# So long as you then define your data table correctly in the model definition, so that Calliope knows exactly how to process your data, it doesn't matter what shape it is stored in. # %% [markdown] # First, we create a directory to hold the tabular data we are about to generate. # %% -data_source_path = Path(".") / "outputs" / "loading_tabular_data" -data_source_path.mkdir(parents=True, exist_ok=True) +data_table_path = Path(".") / "outputs" / "loading_tabular_data" +data_table_path.mkdir(parents=True, exist_ok=True) # %% [markdown] # Next we group together **technology data where no extra dimensions are needed**. @@ -219,7 +220,7 @@ }, } ) -tech_data.to_csv(data_source_path / "tech_data.csv") +tech_data.to_csv(data_table_path / "tech_data.csv") tech_data # %% [markdown] @@ -237,7 +238,7 @@ }, } ) -tech_timestep_data.to_csv(data_source_path / "tech_timestep_data.csv") +tech_timestep_data.to_csv(data_table_path / "tech_timestep_data.csv") tech_timestep_data # %% [markdown] @@ -257,7 +258,7 @@ ("transmission_tech", "carrier_out"): 1, } ) -tech_carrier_data.to_csv(data_source_path / "tech_carrier_data.csv") +tech_carrier_data.to_csv(data_table_path / "tech_carrier_data.csv") tech_carrier_data # %% [markdown] # And the **technology data with the `nodes` dimension**: @@ -265,7 +266,7 @@ tech_node_data = pd.Series( {("supply_tech", "B", "flow_cap_max"): 8, ("supply_tech", "A", "flow_cap_max"): 10} ) -tech_node_data.to_csv(data_source_path / "tech_node_data.csv") +tech_node_data.to_csv(data_table_path / "tech_node_data.csv") tech_node_data # %% [markdown] # Finally, we deal with the **technology data with the `costs` dimension**. @@ -280,40 +281,40 @@ "supply_tech": {"cost_flow_cap": 2}, } ) -tech_cost_data.to_csv(data_source_path / "tech_cost_data.csv") +tech_cost_data.to_csv(data_table_path / "tech_cost_data.csv") tech_cost_data # %% [markdown] -# Now our YAML model definition can simply link to each of the CSV files we created in the `data_sources`` section, instead of needing to define the data in YAML directly: +# Now our YAML model definition can simply link to each of the CSV files we created in the `data_tables` section, instead of needing to define the data in YAML directly: # # ```yaml -# data_sources: +# data_tables: # tech_data: -# source: outputs/loading_tabular_data/tech_data.csv +# data: outputs/loading_tabular_data/tech_data.csv # rows: parameters # columns: techs # tech_node_data: -# source: outputs/loading_tabular_data/tech_node_data.csv +# data: outputs/loading_tabular_data/tech_node_data.csv # rows: [techs, nodes, parameters] # tech_timestep_data: -# source: outputs/loading_tabular_data/tech_timestep_data.csv +# data: outputs/loading_tabular_data/tech_timestep_data.csv # rows: timesteps # columns: [techs, parameters] # tech_carrier_data: -# source: outputs/loading_tabular_data/tech_carrier_data.csv +# data: outputs/loading_tabular_data/tech_carrier_data.csv # rows: [techs, parameters] # add_dims: # carriers: electricity # tech_cost_data: -# source: outputs/loading_tabular_data/tech_cost_data.csv +# data: outputs/loading_tabular_data/tech_cost_data.csv # rows: parameters # columns: techs # add_dims: # costs: monetary # ``` # -# When loading data sources, assigning techs to nodes is done automatically to some extent. -# That is, if a tech is defined at a node in a data source (in this case, only for `supply_tech`), then Calliope assumes that this tech should be allowed to exist at the corresponding node. +# When loading data tables, assigning techs to nodes is done automatically to some extent. +# That is, if a tech is defined at a node in a data table (in this case, only for `supply_tech`), then Calliope assumes that this tech should be allowed to exist at the corresponding node. # Since it is easy to lose track of which parameters you've defined at nodes and which ones not, it is _much_ safer to explicitly define a list of technologies at each node in your YAML definition: # # ```yaml @@ -325,25 +326,25 @@ # %% model_def = calliope.AttrDict.from_yaml_string( """ -data_sources: +data_tables: tech_data: - source: outputs/loading_tabular_data/tech_data.csv + data: outputs/loading_tabular_data/tech_data.csv rows: parameters columns: techs tech_node_data: - source: outputs/loading_tabular_data/tech_node_data.csv + data: outputs/loading_tabular_data/tech_node_data.csv rows: [techs, nodes, parameters] tech_timestep_data: - source: outputs/loading_tabular_data/tech_timestep_data.csv + data: outputs/loading_tabular_data/tech_timestep_data.csv rows: timesteps columns: [techs, parameters] tech_carrier_data: - source: outputs/loading_tabular_data/tech_carrier_data.csv + data: outputs/loading_tabular_data/tech_carrier_data.csv rows: [techs, parameters] add_dims: carriers: electricity tech_cost_data: - source: outputs/loading_tabular_data/tech_cost_data.csv + data: outputs/loading_tabular_data/tech_cost_data.csv rows: parameters columns: techs add_dims: @@ -353,35 +354,35 @@ B.techs: {supply_tech, demand_tech} """ ) -model_from_data_sources = calliope.Model(model_def) +model_from_data_tables = calliope.Model(model_def) # %% [markdown] # ### Loading directly from in-memory dataframes # If you create your tabular data in an automated manner in a Python script, you may want to load it directly into Calliope rather than saving it to file first. -# You can do that by setting the data source as the name of a key in a dictionary that you supply when you load the model: +# You can do that by setting `data` as the name of a key in a dictionary that you supply when you load the model: # %% model_def = calliope.AttrDict.from_yaml_string( """ -data_sources: +data_tables: tech_data: - source: tech_data_df + data: tech_data_df rows: parameters columns: techs tech_node_data: - source: tech_node_data_df + data: tech_node_data_df rows: [techs, nodes, parameters] tech_timestep_data: - source: tech_timestep_data_df + data: tech_timestep_data_df rows: timesteps columns: [techs, parameters] tech_carrier_data: - source: tech_carrier_data_df + data: tech_carrier_data_df rows: [techs, parameters] add_dims: carriers: electricity tech_cost_data: - source: tech_cost_data_df + data: tech_cost_data_df rows: parameters columns: techs add_dims: @@ -391,9 +392,9 @@ B.techs: {supply_tech, demand_tech} """ ) -model_from_data_sources = calliope.Model( +model_from_data_tables = calliope.Model( model_def, - data_source_dfs={ + data_table_dfs={ "tech_data_df": tech_data, # NOTE: inputs must be dataframes. # pandas Series objects must therefore be converted: @@ -413,15 +414,15 @@ model_from_yaml.solve(force=True) # %% -model_from_data_sources.build(force=True) -model_from_data_sources.solve(force=True) +model_from_data_tables.build(force=True) +model_from_data_tables.solve(force=True) # %% [markdown] # **Input data**. Now we check if the input data are exactly the same across both models:" # %% for variable_name, variable_data in model_from_yaml.inputs.data_vars.items(): - if variable_data.broadcast_equals(model_from_data_sources.inputs[variable_name]): + if variable_data.broadcast_equals(model_from_data_tables.inputs[variable_name]): print(f"Great work, {variable_name} matches") else: print(f"!!! Something's wrong! {variable_name} doesn't match !!!") @@ -432,20 +433,20 @@ # %% for variable_name, variable_data in model_from_yaml.results.data_vars.items(): - if variable_data.broadcast_equals(model_from_data_sources.results[variable_name]): + if variable_data.broadcast_equals(model_from_data_tables.results[variable_name]): print(f"Great work, {variable_name} matches") else: print(f"!!! Something's wrong! {variable_name} doesn't match !!!") # %% [markdown] -# ## Mixing YAML and data source definitions +# ## Mixing YAML and data table definitions # It is possible to only put some data into CSV files and define the rest in YAML. # In fact, it almost always makes sense to build these hybrid definitions. For smaller models, you may only want to store timeseries data stored in CSV files and everything else in YAML: # # ```yaml -# data_sources: +# data_tables: # tech_timestep_data: -# source: outputs/loading_tabular_data/tech_timestep_data.csv +# data: outputs/loading_tabular_data/tech_timestep_data.csv # rows: timesteps # columns: [techs, parameters] # techs: @@ -498,13 +499,13 @@ # # # ```yaml -# data_sources: +# data_tables: # tech_timestep_data: -# source: outputs/loading_tabular_data/tech_timestep_data.csv +# data: outputs/loading_tabular_data/tech_timestep_data.csv # rows: timesteps # columns: [techs, parameters] # tech_cost_data: -# source: outputs/loading_tabular_data/tech_cost_data.csv +# data: outputs/loading_tabular_data/tech_cost_data.csv # rows: parameters # columns: techs # add_dims: @@ -547,31 +548,31 @@ # %% [markdown] # ## Overriding tabular data with YAML # -# Another reason to mix tabular data sources with YAML is to allow you to keep track of overrides to specific parts of the model definition. +# Another reason to mix tabular data with YAML is to allow you to keep track of overrides to specific parts of the model definition. # # For instance, we could change the number of a couple of parameters: # # # ```yaml -# data_sources: +# data_tables: # tech_data: -# source: outputs/loading_tabular_data/tech_data.csv +# data: outputs/loading_tabular_data/tech_data.csv # rows: parameters # columns: techs # tech_node_data: -# source: outputs/loading_tabular_data/tech_node_data.csv +# data: outputs/loading_tabular_data/tech_node_data.csv # rows: [techs, nodes, parameters] # tech_timestep_data: -# source: outputs/loading_tabular_data/tech_timestep_data.csv +# data: outputs/loading_tabular_data/tech_timestep_data.csv # rows: timesteps # columns: [techs, parameters] # tech_carrier_data: -# source: outputs/loading_tabular_data/tech_carrier_data.csv +# data: outputs/loading_tabular_data/tech_carrier_data.csv # rows: [techs, parameters] # add_dims: # carriers: electricity # tech_cost_data: -# source: outputs/loading_tabular_data/tech_cost_data.csv +# data: outputs/loading_tabular_data/tech_cost_data.csv # rows: parameters # columns: techs # add_dims: @@ -591,25 +592,25 @@ # %% model_def = calliope.AttrDict.from_yaml_string( """ -data_sources: +data_tables: tech_data: - source: outputs/loading_tabular_data/tech_data.csv + data: outputs/loading_tabular_data/tech_data.csv rows: parameters columns: techs tech_node_data: - source: outputs/loading_tabular_data/tech_node_data.csv + data: outputs/loading_tabular_data/tech_node_data.csv rows: [techs, nodes, parameters] tech_timestep_data: - source: outputs/loading_tabular_data/tech_timestep_data.csv + data: outputs/loading_tabular_data/tech_timestep_data.csv rows: timesteps columns: [techs, parameters] tech_carrier_data: - source: outputs/loading_tabular_data/tech_carrier_data.csv + data: outputs/loading_tabular_data/tech_carrier_data.csv rows: [techs, parameters] add_dims: carriers: electricity tech_cost_data: - source: outputs/loading_tabular_data/tech_cost_data.csv + data: outputs/loading_tabular_data/tech_cost_data.csv rows: parameters columns: techs add_dims: @@ -625,39 +626,39 @@ B.techs: {supply_tech, demand_tech} """ ) -model_from_data_sources_w_override = calliope.Model(model_def) +model_from_data_tables_w_override = calliope.Model(model_def) # Let's compare the two after overriding `flow_cap_max` -flow_cap_old = model_from_data_sources.inputs.flow_cap_max.to_series().dropna() +flow_cap_old = model_from_data_tables.inputs.flow_cap_max.to_series().dropna() flow_cap_new = ( - model_from_data_sources_w_override.inputs.flow_cap_max.to_series().dropna() + model_from_data_tables_w_override.inputs.flow_cap_max.to_series().dropna() ) pd.concat([flow_cap_old, flow_cap_new], axis=1, keys=["old", "new"]) # %% [markdown] -# We can also switch off technologies / nodes that would otherwise be introduced by our data sources: +# We can also switch off technologies / nodes that would otherwise be introduced by our data tables: # # # ```yaml -# data_sources: +# data_tables: # tech_data: -# source: outputs/loading_tabular_data/tech_data.csv +# data: outputs/loading_tabular_data/tech_data.csv # rows: parameters # columns: techs # tech_node_data: -# source: outputs/loading_tabular_data/tech_node_data.csv +# data: outputs/loading_tabular_data/tech_node_data.csv # rows: [techs, nodes, parameters] # tech_timestep_data: -# source: outputs/loading_tabular_data/tech_timestep_data.csv +# data: outputs/loading_tabular_data/tech_timestep_data.csv # rows: timesteps # columns: [techs, parameters] # tech_carrier_data: -# source: outputs/loading_tabular_data/tech_carrier_data.csv +# data: outputs/loading_tabular_data/tech_carrier_data.csv # rows: [techs, parameters] # add_dims: # carriers: electricity # tech_cost_data: -# source: outputs/loading_tabular_data/tech_cost_data.csv +# data: outputs/loading_tabular_data/tech_cost_data.csv # rows: parameters # columns: techs # add_dims: @@ -677,25 +678,25 @@ # %% model_def = calliope.AttrDict.from_yaml_string( """ -data_sources: +data_tables: tech_data: - source: outputs/loading_tabular_data/tech_data.csv + data: outputs/loading_tabular_data/tech_data.csv rows: parameters columns: techs tech_node_data: - source: outputs/loading_tabular_data/tech_node_data.csv + data: outputs/loading_tabular_data/tech_node_data.csv rows: [techs, nodes, parameters] tech_timestep_data: - source: outputs/loading_tabular_data/tech_timestep_data.csv + data: outputs/loading_tabular_data/tech_timestep_data.csv rows: timesteps columns: [techs, parameters] tech_carrier_data: - source: outputs/loading_tabular_data/tech_carrier_data.csv + data: outputs/loading_tabular_data/tech_carrier_data.csv rows: [techs, parameters] add_dims: carriers: electricity tech_cost_data: - source: outputs/loading_tabular_data/tech_cost_data.csv + data: outputs/loading_tabular_data/tech_cost_data.csv rows: parameters columns: techs add_dims: @@ -711,11 +712,13 @@ active: false """ ) -model_from_data_sources_w_deactivations = calliope.Model(model_def) +model_from_data_tables_w_deactivations = calliope.Model(model_def) # Let's compare the two after overriding `flow_cap_max` definition_matrix_old = ( - model_from_data_sources.inputs.definition_matrix.to_series().dropna() + model_from_data_tables.inputs.definition_matrix.to_series().dropna() +) +definition_matrix_new = ( + model_from_data_tables_w_deactivations.inputs.definition_matrix.to_series().dropna() ) -definition_matrix_new = model_from_data_sources_w_deactivations.inputs.definition_matrix.to_series().dropna() pd.concat([definition_matrix_old, definition_matrix_new], axis=1, keys=["old", "new"]) diff --git a/docs/examples/national_scale/notebook.py b/docs/examples/national_scale/notebook.py index e6de74a42..70e6d1140 100644 --- a/docs/examples/national_scale/notebook.py +++ b/docs/examples/national_scale/notebook.py @@ -18,10 +18,11 @@ # This notebook will show you how to load, build, solve, and examine the results of the national scale example model. # %% -import calliope import pandas as pd import plotly.express as px +import calliope + # We increase logging verbosity calliope.set_log_verbosity("INFO", include_solver_output=False) diff --git a/docs/examples/piecewise_constraints.py b/docs/examples/piecewise_constraints.py index ae6a44c58..064be53d7 100644 --- a/docs/examples/piecewise_constraints.py +++ b/docs/examples/piecewise_constraints.py @@ -21,10 +21,11 @@ # %% -import calliope import numpy as np import plotly.express as px +import calliope + calliope.set_log_verbosity("INFO", include_solver_output=False) # %% [markdown] @@ -67,22 +68,20 @@ # # %% -new_params = { - "parameters": { - "capacity_steps": { - "data": capacity_steps, - "index": [0, 1, 2, 3, 4], - "dims": "breakpoints", - }, - "cost_steps": { - "data": cost_steps, - "index": [0, 1, 2, 3, 4], - "dims": "breakpoints", - }, - } -} +new_params = f""" + parameters: + capacity_steps: + data: {capacity_steps} + index: [0, 1, 2, 3, 4] + dims: "breakpoints" + cost_steps: + data: {cost_steps} + index: [0, 1, 2, 3, 4] + dims: "breakpoints" +""" print(new_params) -m = calliope.examples.national_scale(override_dict=new_params) +new_params_as_dict = calliope.AttrDict.from_yaml_string(new_params) +m = calliope.examples.national_scale(override_dict=new_params_as_dict) # %% m.inputs.capacity_steps @@ -94,55 +93,48 @@ # ## Creating our piecewise constraint # # We create the piecewise constraint by linking decision variables to the piecewise curve we have created. -# In this example, we require a new decision variable for investment costs that can take on the value defined by the curve at a given value of `flow_cap`. - -# %% -m.math["variables"]["piecewise_cost_investment"] = { - "description": "Investment cost that increases monotonically", - "foreach": ["nodes", "techs", "carriers", "costs"], - "where": "[csp] in techs", - "bounds": {"min": 0, "max": np.inf}, - "default": 0, -} - -# %% [markdown] -# We also need to link that decision variable to our total cost calculation. +# In this example, we need: +# 1. a new decision variable for investment costs that can take on the value defined by the curve at a given value of `flow_cap`; +# 1. to link that decision variable to our total cost calculation; and +# 1. to define the piecewise constraint. # %% -# Before -m.math["global_expressions"]["cost_investment_flow_cap"]["equations"] - -# %% -# Updated - we split the equation into two expressions. -m.math["global_expressions"]["cost_investment_flow_cap"]["equations"] = [ - {"expression": "$cost_sum * flow_cap", "where": "NOT [csp] in techs"}, - {"expression": "piecewise_cost_investment", "where": "[csp] in techs"}, -] - -# %% [markdown] -# We then need to define the piecewise constraint: - -# %% -m.math["piecewise_constraints"]["csp_piecewise_costs"] = { - "description": "Set investment costs values along a piecewise curve using special ordered sets of type 2 (SOS2).", - "foreach": ["nodes", "techs", "carriers", "costs"], - "where": "piecewise_cost_investment", - "x_expression": "flow_cap", - "x_values": "capacity_steps", - "y_expression": "piecewise_cost_investment", - "y_values": "cost_steps", -} - -# %% [markdown] -# Then we can build our optimisation problem: +new_math = """ + variables: + piecewise_cost_investment: + description: "Investment cost that increases monotonically" + foreach: ["nodes", "techs", "carriers", "costs"] + where: "[csp] in techs" + bounds: + min: 0 + max: .inf + default: 0 + global_expressions: + cost_investment_flow_cap: + equations: + - expression: "$cost_sum * flow_cap" + where: "NOT [csp] in techs" + - expression: "piecewise_cost_investment" + where: "[csp] in techs" + piecewise_constraints: + csp_piecewise_costs: + description: "Set investment costs values along a piecewise curve using special ordered sets of type 2 (SOS2)." + foreach: ["nodes", "techs", "carriers", "costs"] + where: "piecewise_cost_investment" + x_expression: "flow_cap" + x_values: "capacity_steps" + y_expression: "piecewise_cost_investment" + y_values: "cost_steps" +""" # %% [markdown] # # Building and checking the optimisation problem # -# With our piecewise constraint defined, we can build our optimisation problem +# With our piecewise constraint defined, we can build our optimisation problem and inject this new math. # %% -m.build() +new_math_as_dict = calliope.AttrDict.from_yaml_string(new_math) +m.build(add_math_dict=new_math_as_dict) # %% [markdown] # And we can see that our piecewise constraint exists in the built optimisation problem "backend" @@ -189,65 +181,6 @@ ) fig.show() -# %% [markdown] -# ## YAML model definition -# We have updated the model parameters and math interactively in Python in this tutorial, the definition in YAML would look like: - -# %% [markdown] -# ### Math -# -# Saved as e.g., `csp_piecewise_math.yaml`. -# -# ```yaml -# variables: -# piecewise_cost_investment: -# description: Investment cost that increases monotonically -# foreach: [nodes, techs, carriers, costs] -# where: "[csp] in techs" -# bounds: -# min: 0 -# max: .inf -# default: 0 -# -# piecewise_constraints: -# csp_piecewise_costs: -# description: > -# Set investment costs values along a piecewise curve using special ordered sets of type 2 (SOS2). -# foreach: [nodes, techs, carriers, costs] -# where: "[csp] in techs" -# x_expression: flow_cap -# x_values: capacity_steps -# y_expression: piecewise_cost_investment -# y_values: cost_steps -# -# global_expressions: -# cost_investment_flow_cap.equations: -# - expression: "$cost_sum * flow_cap" -# where: "NOT [csp] in techs" -# - expression: "piecewise_cost_investment" -# where: "[csp] in techs" -# ``` - -# %% [markdown] -# ### Scenario definition -# -# Loaded into the national-scale example model with: `calliope.examples.national_scale(scenario="piecewise_csp_cost")` -# -# ```yaml -# overrides: -# piecewise_csp_cost: -# config.init.add_math: [csp_piecewise_math.yaml] -# parameters: -# capacity_steps: -# data: [0, 2500, 5000, 7500, 10000] -# index: [0, 1, 2, 3, 4] -# dims: "breakpoints" -# cost_steps: -# data: [0, 3.75e6, 6e6, 7.5e6, 8e6] -# index: [0, 1, 2, 3, 4] -# dims: "breakpoints" -# ``` - # %% [markdown] # ## Troubleshooting # diff --git a/docs/examples/urban_scale/index.md b/docs/examples/urban_scale/index.md index 318cc7f2e..499c9932e 100644 --- a/docs/examples/urban_scale/index.md +++ b/docs/examples/urban_scale/index.md @@ -1,6 +1,6 @@ --- demand: - file: "src/calliope/example_models/urban_scale/data_sources/demand.csv" + file: "src/calliope/example_models/urban_scale/data_tables/demand.csv" header: [0, 1] index_col: 0 --- @@ -50,10 +50,10 @@ The import section in our file looks like this: ### Referencing tabular data As of Calliope v0.7.0 it is possible to load tabular data completely separately from the YAML model definition. -To do this we reference data tables under the `data_sources` key: +To do this we reference data tables under the `data_tables` key: ```yaml ---8<-- "src/calliope/example_models/urban_scale/model.yaml:data-sources" +--8<-- "src/calliope/example_models/urban_scale/model.yaml:data-tables" ``` In the Calliope example models, we only load timeseries data from file, including for [energy demand](#demand-technologies), [electricity export price](#revenue-by-export) and [solar PV resource availability](#supply-technologies). @@ -63,7 +63,7 @@ As an example, the data in the energy demand CSV file looks like this: {{ read_csv(page.meta.demand.file, header=page.meta.demand.header, index_col=page.meta.demand.index_col) }} You'll notice that in each row there is reference to a timestep, and in each column to a technology and a node. -Therefore, we reference `timesteps` in our data source `rows` and `nodes` and `techs` in our data source columns. +Therefore, we reference `timesteps` in our data table _rows_, and `nodes` and `techs` in our data table _columns_. Since all the data refers to the one parameter `sink_use_equals`, we don't add that information in the CSV file, but instead add it on as a dimension when loading the file. !!! info diff --git a/docs/examples/urban_scale/notebook.py b/docs/examples/urban_scale/notebook.py index 9cc45947c..4374fce93 100644 --- a/docs/examples/urban_scale/notebook.py +++ b/docs/examples/urban_scale/notebook.py @@ -18,10 +18,11 @@ # This notebook will show you how to load, build, solve, and examine the results of the urban scale example model. # %% -import calliope import pandas as pd import plotly.express as px +import calliope + # We increase logging verbosity calliope.set_log_verbosity("INFO", include_solver_output=False) diff --git a/docs/hooks/dummy_model/model.yaml b/docs/hooks/dummy_model/model.yaml index d0c53a29e..e9c1976fa 100644 --- a/docs/hooks/dummy_model/model.yaml +++ b/docs/hooks/dummy_model/model.yaml @@ -2,8 +2,9 @@ overrides: storage_inter_cluster: config.init: name: inter-cluster storage - add_math: ["storage_inter_cluster"] time_cluster: cluster_days.csv + config.build: + add_math: ["storage_inter_cluster"] config.init.name: base @@ -16,29 +17,29 @@ techs: from: A to: B -data_sources: +data_tables: techs: - source: techs.csv + data: techs.csv rows: [techs, parameters] nodes: - source: nodes.csv + data: nodes.csv rows: parameters add_dims: nodes: [A, B] tech_carrier: - source: tech_carrier.csv + data: tech_carrier.csv rows: [techs, carriers, parameters] costs: - source: costs.csv + data: costs.csv rows: [techs, parameters] add_dims: costs: monetary time_varying: - source: time_varying.csv + data: time_varying.csv rows: timesteps add_dims: parameters: sink_use_max diff --git a/docs/hooks/generate_math_docs.py b/docs/hooks/generate_math_docs.py index 90b47f5cf..b513ebd25 100644 --- a/docs/hooks/generate_math_docs.py +++ b/docs/hooks/generate_math_docs.py @@ -8,9 +8,11 @@ import textwrap from pathlib import Path -import calliope from mkdocs.structure.files import File +import calliope +from calliope.postprocess.math_documentation import MathDocumentation + logger = logging.getLogger("mkdocs") TEMPDIR = tempfile.TemporaryDirectory() @@ -28,7 +30,7 @@ In the expressions, terms in **bold** font are [decision variables](#decision-variables) and terms in *italic* font are [parameters](#parameters). The [decision variables](#decision-variables) and [parameters](#parameters) are listed at the end of the page; they also refer back to the global expressions / constraints in which they are used. -Those parameters which are defined over time (`timesteps`) in the expressions can be defined by a user as a single, time invariant value, or as a timeseries that is [loaded from file or dataframe](../creating/data_sources.md). +Those parameters which are defined over time (`timesteps`) in the expressions can be defined by a user as a single, time invariant value, or as a timeseries that is [loaded from file or dataframe](../creating/data_tables.md). !!! note @@ -42,31 +44,33 @@ def on_files(files: list, config: dict, **kwargs): """Process documentation for pre-defined calliope math files.""" model_config = calliope.AttrDict.from_yaml(MODEL_PATH) - base_model = generate_base_math_model() + base_documentation = generate_base_math_documentation() write_file( - "base.yaml", + "plan.yaml", textwrap.dedent( """ Complete base mathematical formulation for a Calliope model. This math is _always_ applied but can be overridden with pre-defined additional math or [your own math][adding-your-own-math-to-a-model]. """ ), - base_model, + base_documentation, files, config, ) for override in model_config["overrides"].keys(): - custom_model = generate_custom_math_model(base_model, override) + custom_documentation = generate_custom_math_documentation( + base_documentation, override + ) write_file( f"{override}.yaml", textwrap.dedent( f""" - Pre-defined additional math to apply {custom_model.inputs.attrs['name']} math on top of the [base mathematical formulation][base-math]. + Pre-defined additional math to apply {custom_documentation.name} math on top of the [base mathematical formulation][base-math]. This math is _only_ applied if referenced in the `config.init.add_math` list as `{override}`. """ ), - custom_model, + custom_documentation, files, config, ) @@ -77,7 +81,7 @@ def on_files(files: list, config: dict, **kwargs): def write_file( filename: str, description: str, - model: calliope.Model, + math_documentation: MathDocumentation, files: list[File], config: dict, ) -> None: @@ -86,12 +90,10 @@ def write_file( Args: filename (str): name of produced `.md` file. description (str): first paragraph after title. - model (calliope.Model): calliope model with the given math. + math_documentation (MathDocumentation): calliope math documentation. files (list[File]): math files to parse. config (dict): documentation configuration. """ - title = model.inputs.attrs["name"] + " math" - output_file = (Path("math") / filename).with_suffix(".md") output_full_filepath = Path(TEMPDIR.name) / output_file output_full_filepath.parent.mkdir(exist_ok=True, parents=True) @@ -122,7 +124,8 @@ def write_file( nav_reference["Pre-defined math"].append(output_file.as_posix()) - math_doc = model.math_documentation.write(format="md", mkdocs_features=True) + title = math_documentation.name + math_doc = math_documentation.write(format="md", mkdocs_features=True) file_to_download = Path("..") / filename output_full_filepath.write_text( PREPEND_SNIPPET.format( @@ -135,65 +138,67 @@ def write_file( ) -def generate_base_math_model() -> calliope.Model: - """Generate model with documentation for the base math. - - Args: - model_config (dict): Calliope model config. +def generate_base_math_documentation() -> MathDocumentation: + """Generate model documentation for the base math. Returns: - calliope.Model: Base math model to use in generating math docs. + MathDocumentation: model math documentation with latex backend. """ model = calliope.Model(model_definition=MODEL_PATH) - model.math_documentation.build() - return model + model.build() + return MathDocumentation(model) -def generate_custom_math_model( - base_model: calliope.Model, override: str -) -> calliope.Model: - """Generate model with documentation for a pre-defined math file. +def generate_custom_math_documentation( + base_documentation: MathDocumentation, override: str +) -> MathDocumentation: + """Generate model documentation for a pre-defined math file. Only the changes made relative to the base math will be shown. Args: - base_model (calliope.Model): Calliope model with only the base math applied. + base_documentation (MathDocumentation): model documentation with only the base math applied. override (str): Name of override to load from the list available in the model config. + + Returns: + MathDocumentation: model math documentation with latex backend. """ model = calliope.Model(model_definition=MODEL_PATH, scenario=override) + model.build() full_del = [] expr_del = [] - for component_group, component_group_dict in model.math.items(): + for component_group, component_group_dict in model.applied_math.data.items(): for name, component_dict in component_group_dict.items(): - if name in base_model.math[component_group]: + if name in base_documentation.math.data[component_group]: if not component_dict.get("active", True): expr_del.append(name) component_dict["description"] = "|REMOVED|" component_dict["active"] = True - elif base_model.math[component_group].get(name, {}) != component_dict: + elif ( + base_documentation.math.data[component_group].get(name, {}) + != component_dict + ): _add_to_description(component_dict, "|UPDATED|") else: full_del.append(name) else: _add_to_description(component_dict, "|NEW|") - model.math_documentation.build() + math_documentation = MathDocumentation(model) for key in expr_del: - model.math_documentation._instance._dataset[key].attrs["math_string"] = "" + math_documentation.backend._dataset[key].attrs["math_string"] = "" for key in full_del: - del model.math_documentation._instance._dataset[key] - for var in model.math_documentation._instance._dataset.values(): + del math_documentation.backend._dataset[key] + for var in math_documentation.backend._dataset.values(): var.attrs["references"] = var.attrs["references"].intersection( - model.math_documentation._instance._dataset.keys() + math_documentation.backend._dataset.keys() ) var.attrs["references"] = var.attrs["references"].difference(expr_del) - logger.info( - model.math_documentation._instance._dataset["carrier_in"].attrs["references"] - ) + logger.info(math_documentation.backend._dataset["carrier_in"].attrs["references"]) - return model + return math_documentation def _add_to_description(component_dict: dict, update_string: str) -> None: diff --git a/docs/hooks/generate_plots.py b/docs/hooks/generate_plots.py index 9a606adc6..256085f97 100644 --- a/docs/hooks/generate_plots.py +++ b/docs/hooks/generate_plots.py @@ -5,12 +5,13 @@ import tempfile from pathlib import Path -import calliope import pandas as pd import plotly.graph_objects as go import xarray as xr from mkdocs.structure.files import File +import calliope + TEMPDIR = tempfile.TemporaryDirectory() diff --git a/docs/hooks/generate_readable_schema.py b/docs/hooks/generate_readable_schema.py index 8b104351c..89ae232e5 100644 --- a/docs/hooks/generate_readable_schema.py +++ b/docs/hooks/generate_readable_schema.py @@ -12,16 +12,17 @@ from pathlib import Path import jsonschema2md -from calliope.util import schema from mkdocs.structure.files import File +from calliope.util import schema + TEMPDIR = tempfile.TemporaryDirectory() SCHEMAS = { "config_schema": schema.CONFIG_SCHEMA, "model_schema": schema.MODEL_SCHEMA, "math_schema": schema.MATH_SCHEMA, - "data_source_schema": schema.DATA_SOURCE_SCHEMA, + "data_table_schema": schema.DATA_TABLE_SCHEMA, } diff --git a/docs/migrating.md b/docs/migrating.md index 97ec44210..f361358d0 100644 --- a/docs/migrating.md +++ b/docs/migrating.md @@ -67,9 +67,9 @@ Instead, you define all your technology parameters at the same level. dims: costs ``` -### `file=`/`df=` → `data_sources` section +### `file=`/`df=` → `data_tables` section -`file=/df=` parameter values as references to timeseries data is replaced with loading tabular data at the top-level using the `data_sources` key. +`file=/df=` parameter values as references to timeseries data is replaced with loading tabular data at the top-level using the `data_tables` key. Assuming you have these two files: @@ -108,9 +108,9 @@ supply_file.csv: === "v0.7" ```yaml - data_sources: + data_tables: demand_data: - source: demand_file.csv + data: demand_file.csv rows: timesteps columns: nodes add_dims: @@ -118,7 +118,7 @@ supply_file.csv: parameters: sink_equals supply_data: - source: supply_file.csv + data: supply_file.csv rows: timesteps columns: nodes add_dims: @@ -127,7 +127,7 @@ supply_file.csv: ``` !!! info "See also" - [`data_sources` introduction](creating/data_sources.md); [`data_sources` tutorial][loading-tabular-data]. + [`data_tables` introduction](creating/data_tables.md); [`data_tables` tutorial][loading-tabular-data]. ### Negative → positive demand and carrier consumption values @@ -359,7 +359,7 @@ Instead of defining the binary trigger `force_resource` to enforce the productio If you want these resource uses to be upper or lower bounds, use the equivalent `_max`/`_min` parameters. -You can find an example of this change [above](#filedf-→-data_sources-section). +You can find an example of this change [above](#filedf-→-data_tables-section). ### `units` + `purchased` → `purchased_units` @@ -696,8 +696,8 @@ We have re-implemented all these constraints as tested additional math snippets, ### Configuration options -* With the [change in how timeseries data is defined](#filedf-→-data_sources-section), we have removed the reference to a `timeseries_data_path`. -Instead, data source filepaths should always be relative to the `model.yaml` file or they should be absolute paths. +* With the [change in how timeseries data is defined](#filedf-→-data_tables-section), we have removed the reference to a `timeseries_data_path`. +Instead, data table filepaths should always be relative to the `model.yaml` file or they should be absolute paths. * We have removed `run.relax_constraint` alongside [removing group constraints](#group-constraints). * We have removed `model.file_allowed`, which many users will not even know existed (it was a largely internal configuration option)! Instead, it is possible to index any parameter over the time dimension. @@ -963,11 +963,11 @@ nodes: ### Loading non-timeseries tabular data -With the [change in loading timeseries data](#filedf-→-data_sources-section), we have expanded loading of tabular data to allow any data input. +With the [change in loading timeseries data](#filedf-→-data_tables-section), we have expanded loading of tabular data to allow any data input. Technically, you can now define all your data in tables (although we would still recommend a mix of YAML and tabular model definition). !!! info "See also" - `data_sources` [introduction](creating/data_sources.md) and [tutorial][loading-tabular-data]. + `data_tables` [introduction](creating/data_tables.md) and [tutorial][loading-tabular-data]. ### YAML-based math syntax diff --git a/docs/pre_defined_math/index.md b/docs/pre_defined_math/index.md index 6a460298f..f919fed7e 100644 --- a/docs/pre_defined_math/index.md +++ b/docs/pre_defined_math/index.md @@ -3,7 +3,7 @@ As of Calliope version 0.7, the math used to build optimisation problems is stored in YAML files. The pre-defined math is a re-implementation of the formerly hardcoded math formulation in this YAML format. -The base math is _always_ applied to your model when you `build` the optimisation problem. +The pre-defined math for your chosen run [mode](../creating/config.md#configbuildmode) is _always_ applied to your model when you `build` the optimisation problem. We have also pre-defined some additional math, which you can _optionally_ load into your model. For instance, the [inter-cluster storage][inter-cluster-storage-math] math allows you to track storage levels in technologies more accurately when you are using timeseries clustering in your model. @@ -11,17 +11,12 @@ To load optional, pre-defined math on top of the base math, you can reference it ```yaml config: - init: + build: add_math: [storage_inter_cluster] ``` -When solving the model in a run mode other than `plan`, some pre-defined additional math will be applied automatically from a file of the same name (e.g., `spores` mode math is stored in [math/spores.yaml](https://github.com/calliope-project/calliope/blob/main/src/calliope/math/spores.yaml)). - -!!! note - - Additional math is applied in the order it appears in the `#!yaml config.init.add_math` list. - By default, any run mode math will be applied as the final step. - If you want to apply your own math *after* the run mode math, you should add the name of the run mode explicitly to the `#!yaml config.init.add_math` list, e.g., `#!yaml config.init.add_math: [operate, user_defined_math.yaml]`. +If you are running in the `plan` run mode, this will first apply all the [`plan`][base-math] pre-defined math, then the [`storage_inter_cluster`][inter-cluster-storage-math] pre-defined math. +All pre-defined math YAML files can be found in [`math` directory of the Calliope source code](https://github.com/calliope-project/calliope/blob/main/src/calliope/math/storage_inter_cluster.yaml). If you want to introduce new constraints, decision variables, or objectives, you can do so as part of the collection of YAML files describing your model. See the [user-defined math](../user_defined_math/index.md) section for an in-depth guide to applying your own math. diff --git a/docs/user_defined_math/components.md b/docs/user_defined_math/components.md index b180d3476..179667287 100644 --- a/docs/user_defined_math/components.md +++ b/docs/user_defined_math/components.md @@ -12,7 +12,7 @@ A decision variable in Calliope math looks like this: ```yaml variables: ---8<-- "src/calliope/math/base.yaml:variable" +--8<-- "src/calliope/math/plan.yaml:variable" ``` 1. It needs a unique name (`storage_cap` in the example above). @@ -48,7 +48,7 @@ To not clutter the objective function with all combinations of variables and par ```yaml global_expressions: ---8<-- "src/calliope/math/base.yaml:expression" +--8<-- "src/calliope/math/plan.yaml:expression" ``` Global expressions are by no means necessary to include, but can make more complex linear expressions easier to keep track of and can reduce post-processing requirements. @@ -74,7 +74,7 @@ Here is an example: ```yaml constraints: ---8<-- "src/calliope/math/base.yaml:constraint" +--8<-- "src/calliope/math/plan.yaml:constraint" ``` 1. It needs a unique name (`set_storage_initial` in the above example). @@ -138,7 +138,7 @@ With your constrained decision variables and a global expression that binds thes ```yaml objectives: ---8<-- "src/calliope/math/base.yaml:objective" +--8<-- "src/calliope/math/plan.yaml:objective" ``` 1. It needs a unique name. diff --git a/docs/user_defined_math/customise.md b/docs/user_defined_math/customise.md index bdc0c048c..19131ba8f 100644 --- a/docs/user_defined_math/customise.md +++ b/docs/user_defined_math/customise.md @@ -4,8 +4,8 @@ Once you understand the [math components](components.md) and the [formulation sy You can find examples of additional math that we have put together in our [math example gallery](examples/index.md). -Whenever you introduce your own math, it will be applied on top of the [base math][base-math]. -Therefore, you can include base math overrides as well as add new math. +Whenever you introduce your own math, it will be applied on top of the pre-defined math for your chosen run [mode](../creating/config.md#configbuildmode). +Therefore, you can override the pre-defined math as well as add new math. For example, you may want to introduce a timeseries parameter to the pre-defined `storage_max` constraint to limit maximum storage capacity on a per-timestep basis: ```yaml @@ -16,11 +16,16 @@ storage_max: The other elements of the `storage_max` constraints have not changed (`foreach`, `where`, ...), so we do not need to define them again when adding our own twist on the pre-defined math. -When defining your model, you can reference any number of YAML files containing the math you want to add in `config.init`. The paths are relative to your main model configuration file: +!!! note + + If you prefer to start from scratch with your math, you can ask Calliope to _not_ load the pre-defined math for your chosen run mode by setting `#!yaml config.build.ignore_mode_math: true`. + +When defining your model, you can reference any number of YAML files containing the math you want to add in `config.build`. +The paths are relative to your main model configuration file: ```yaml config: - init: + build: add_math: [my_new_math_1.yaml, my_new_math_2.yaml] ``` @@ -28,10 +33,22 @@ You can also define a mixture of your own math and the [pre-defined math](../pre ```yaml config: - init: + build: add_math: [my_new_math_1.yaml, storage_inter_cluster, my_new_math_2.md] ``` +Finally, when working in an interactive Python session, you can add math as a dictionary at build time: + +```python +model.build(add_math_dict={...}) +``` + +This will be applied after the pre-defined mode math and any math from file listed in `config.build.add_math`. + +!!! note + + When working in an interactive Python session, you can view the final math dictionary that has been applied to build the optimisation problem by inspecting `model.applied_math` after a successful call to `model.build()`. + ## Adding your parameters to the YAML schema Our YAML schemas are used to validate user inputs. @@ -90,9 +107,13 @@ You can write your model's mathematical formulation to view it in a rich-text fo To write a LaTeX, reStructuredText, or Markdown file that includes only the math valid for your model: ```python +from calliope.postprocess.math_documentation import MathDocumentation + model = calliope.Model("path/to/model.yaml") -model.math_documentation.build(include="valid") -model.math_documentation.write(filename="path/to/output/file.[tex|rst|md]") +model.build() + +math_documentation = MathDocumentation(model, include="valid") +math_documentation.write(filename="path/to/output/file.[tex|rst|md]") ``` You can then convert this to a PDF or HTML page using your renderer of choice. @@ -102,5 +123,5 @@ We recommend you only use HTML as the equations can become too long for a PDF pa You can add interactive elements to your documentation, if you are planning to host them online using MKDocs. This includes tabs to flip between rich-text math and the input YAML snippet, and dropdown lists for math component cross-references. - Just set the `mkdocs_features` argument to `True` in `model.math_documentation.write`. + Just set the `mkdocs_features` argument to `True` in `math_documentation.write`. We use this functionality in our [pre-defined math](../pre_defined_math/index.md). diff --git a/docs/user_defined_math/examples/max_time_varying.yaml b/docs/user_defined_math/examples/max_time_varying.yaml index 1ffc83f26..6dcb1cce9 100644 --- a/docs/user_defined_math/examples/max_time_varying.yaml +++ b/docs/user_defined_math/examples/max_time_varying.yaml @@ -4,7 +4,7 @@ # Set per-timestep variations in limits to out/inflows, which would otherwise be limited by a static value. # For example, `flow_cap` can be made to fluctuate per timestep above/below its rated value. # User-defined timeseries parameters need to be in the model inputs for these constraints. -# This can be achieved by defining them for each relevant technology in a CSV file and loading that as a [data source][loading-tabular-data-data_sources]. +# This can be achieved by defining them for each relevant technology in a CSV file and loading that as a [data table][loading-tabular-data-data_tables]. # # New indexed parameters: # @@ -29,4 +29,4 @@ constraints: equations: - expression: > flow_out <= - flow_cap_max_relative_per_ts * flow_cap * flow_out_parasitic_eff \ No newline at end of file + flow_cap_max_relative_per_ts * flow_cap * flow_out_parasitic_eff diff --git a/docs/user_defined_math/syntax.md b/docs/user_defined_math/syntax.md index e8bc6d38e..b2266d23f 100644 --- a/docs/user_defined_math/syntax.md +++ b/docs/user_defined_math/syntax.md @@ -107,7 +107,7 @@ If you are defining a `constraint`, then you also need to define a comparison op You do not need to define the sets of math components in expressions, unless you are actively "slicing" them. Behind the scenes, we will make sure that every relevant element of the defined `foreach` sets are matched together when applying the expression (we [merge the underlying xarray DataArrays](https://docs.xarray.dev/en/stable/user-guide/combining.html)). Slicing math components involves appending the component with square brackets that contain the slices, e.g. `flow_out[carriers=electricity, nodes=[A, B]]` will slice the `flow_out` decision variable to focus on `electricity` in its `carriers` dimension and only has two nodes (`A` and `B`) on its `nodes` dimension. -To find out what dimensions you can slice a component on, see your input data (`model.inputs`) for parameters and the definition for decision variables in your loaded math dictionary (`model.math.variables`). +To find out what dimensions you can slice a component on, see your input data (`model.inputs`) for parameters and the definition for decision variables in your math dictionary. ## Helper functions diff --git a/mkdocs.yml b/mkdocs.yml index 1ea0bbad0..fa677f278 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -106,7 +106,7 @@ nav: - Nodes: creating/nodes.md - Inheriting from templates: creating/templates.md - Indexed parameters: creating/parameters.md - - Loading tabular data: creating/data_sources.md + - Loading data tables: creating/data_tables.md - Scenarios and overrides: creating/scenarios.md - Running a model: running.md - Analysing a model: analysing.md @@ -157,7 +157,7 @@ nav: - reference/api/exceptions.md - reference/api/logging.md - reference/config_schema.md - - reference/data_source_schema.md + - reference/data_table_schema.md - reference/model_schema.md - reference/math_schema.md - migrating.md diff --git a/src/calliope/attrdict.py b/src/calliope/attrdict.py index 819657a2c..bd94df7b8 100644 --- a/src/calliope/attrdict.py +++ b/src/calliope/attrdict.py @@ -113,6 +113,7 @@ def _resolve_imports( loaded: Self, resolve_imports: bool | str, base_path: str | Path | None = None, + allow_override: bool = False, ) -> Self: if ( isinstance(resolve_imports, bool) @@ -137,7 +138,7 @@ def _resolve_imports( path = relative_path(base_path, k) imported = cls.from_yaml(path) # loaded is added to imported (i.e. it takes precedence) - imported.union(loaded_dict) + imported.union(loaded_dict, allow_override=allow_override) loaded_dict = imported # 'import' key itself is no longer needed loaded_dict.del_key("import") @@ -151,7 +152,10 @@ def _resolve_imports( @classmethod def from_yaml( - cls, filename: str | Path, resolve_imports: bool | str = True + cls, + filename: str | Path, + resolve_imports: bool | str = True, + allow_override: bool = False, ) -> Self: """Returns an AttrDict initialized from the given path or file path. @@ -168,39 +172,54 @@ def from_yaml( filename (str | Path): YAML file. resolve_imports (bool | str, optional): top-level `import:` solving option. Defaults to True. + allow_override (bool, optional): whether or not to allow overrides of already defined keys. + Defaults to False. Returns: Self: constructed AttrDict """ filename = Path(filename) loaded = cls(_yaml_load(filename.read_text(encoding="utf-8"))) - loaded = cls._resolve_imports(loaded, resolve_imports, filename) + loaded = cls._resolve_imports( + loaded, resolve_imports, filename, allow_override=allow_override + ) return loaded @classmethod - def from_yaml_string(cls, string: str, resolve_imports: bool | str = True) -> Self: + def from_yaml_string( + cls, + string: str, + resolve_imports: bool | str = True, + allow_override: bool = False, + ) -> Self: """Returns an AttrDict initialized from the given string. Input string must be valid YAML. + If `resolve_imports` is True, top-level `import:` statements + are resolved recursively. + If `resolve_imports` is False, top-level `import:` statements + are treated like any other key and not further processed. + If `resolve_imports` is a string, such as `foobar`, import + statements underneath that key are resolved, i.e. `foobar.import:`. + When resolving import statements, anything defined locally + overrides definitions in the imported file. + Args: string (str): Valid YAML string. - resolve_imports (bool | str, optional): - If ``resolve_imports`` is True, top-level ``import:`` statements - are resolved recursively. - If ``resolve_imports is False, top-level ``import:`` statements - are treated like any other key and not further processed. - If ``resolve_imports`` is a string, such as ``foobar``, import - statements underneath that key are resolved, i.e. ``foobar.import:``. - When resolving import statements, anything defined locally - overrides definitions in the imported file. + resolve_imports (bool | str, optional): top-level `import:` solving option. + Defaults to True. + allow_override (bool, optional): whether or not to allow overrides of already defined keys. + Defaults to False. Returns: calliope.AttrDict: """ loaded = cls(_yaml_load(string)) - loaded = cls._resolve_imports(loaded, resolve_imports) + loaded = cls._resolve_imports( + loaded, resolve_imports, allow_override=allow_override + ) return loaded def set_key(self, key, value): diff --git a/src/calliope/backend/__init__.py b/src/calliope/backend/__init__.py index 6b7157433..d37395d80 100644 --- a/src/calliope/backend/__init__.py +++ b/src/calliope/backend/__init__.py @@ -5,23 +5,28 @@ import xarray as xr from calliope.backend.gurobi_backend_model import GurobiBackendModel -from calliope.backend.latex_backend_model import MathDocumentation +from calliope.backend.latex_backend_model import ( + ALLOWED_MATH_FILE_FORMATS, + LatexBackendModel, +) from calliope.backend.parsing import ParsedBackendComponent from calliope.backend.pyomo_backend_model import PyomoBackendModel from calliope.exceptions import BackendError - -MODEL_BACKENDS = ("pyomo",) +from calliope.preprocess import CalliopeMath if TYPE_CHECKING: from calliope.backend.backend_model import BackendModel -def get_model_backend(name: str, data: xr.Dataset, **kwargs) -> "BackendModel": +def get_model_backend( + name: str, data: xr.Dataset, math: CalliopeMath, **kwargs +) -> "BackendModel": """Assign a backend using the given configuration. Args: name (str): name of the backend to use. data (Dataset): model data for the backend. + math (CalliopeMath): Calliope math. **kwargs: backend keyword arguments corresponding to model.config.build. Raises: @@ -32,8 +37,8 @@ def get_model_backend(name: str, data: xr.Dataset, **kwargs) -> "BackendModel": """ match name: case "pyomo": - return PyomoBackendModel(data, **kwargs) + return PyomoBackendModel(data, math, **kwargs) case "gurobi": - return GurobiBackendModel(data, **kwargs) + return GurobiBackendModel(data, math, **kwargs) case _: raise BackendError(f"Incorrect backend '{name}' requested.") diff --git a/src/calliope/backend/backend_model.py b/src/calliope/backend/backend_model.py index ab0cd78f9..c52d74abb 100644 --- a/src/calliope/backend/backend_model.py +++ b/src/calliope/backend/backend_model.py @@ -4,7 +4,6 @@ from __future__ import annotations -import importlib import logging import time import typing @@ -32,12 +31,11 @@ from calliope.backend import helper_functions, parsing from calliope.exceptions import warn as model_warn from calliope.io import load_config +from calliope.preprocess.model_math import ORDERED_COMPONENTS_T, CalliopeMath from calliope.util.schema import ( - MATH_SCHEMA, MODEL_SCHEMA, extract_from_schema, update_then_validate_config, - validate_dict, ) if TYPE_CHECKING: @@ -46,14 +44,8 @@ from calliope.exceptions import BackendError T = TypeVar("T") -_COMPONENTS_T = Literal[ - "parameters", - "variables", - "global_expressions", - "constraints", - "piecewise_constraints", - "objectives", -] +ALL_COMPONENTS_T = Literal["parameters", ORDERED_COMPONENTS_T] + LOGGER = logging.getLogger(__name__) @@ -61,11 +53,12 @@ class BackendModelGenerator(ABC): """Helper class for backends.""" - _VALID_COMPONENTS: tuple[_COMPONENTS_T, ...] = typing.get_args(_COMPONENTS_T) + LID_COMPONENTS: tuple[ALL_COMPONENTS_T, ...] = typing.get_args(ALL_COMPONENTS_T) _COMPONENT_ATTR_METADATA = [ "description", "unit", "default", + "type", "title", "math_repr", "original_dtype", @@ -74,12 +67,14 @@ class BackendModelGenerator(ABC): _PARAM_TITLES = extract_from_schema(MODEL_SCHEMA, "title") _PARAM_DESCRIPTIONS = extract_from_schema(MODEL_SCHEMA, "description") _PARAM_UNITS = extract_from_schema(MODEL_SCHEMA, "x-unit") + _PARAM_TYPE = extract_from_schema(MODEL_SCHEMA, "x-type") - def __init__(self, inputs: xr.Dataset, **kwargs): + def __init__(self, inputs: xr.Dataset, math: CalliopeMath, **kwargs): """Abstract base class to build a representation of the optimisation problem. Args: inputs (xr.Dataset): Calliope model data. + math (CalliopeMath): Calliope math. **kwargs (Any): build configuration overrides. """ self._dataset = xr.Dataset() @@ -88,10 +83,12 @@ def __init__(self, inputs: xr.Dataset, **kwargs): self.inputs.attrs["config"]["build"] = update_then_validate_config( "build", self.inputs.attrs["config"], **kwargs ) - self._check_inputs() - + self.math: CalliopeMath = deepcopy(math) self._solve_logger = logging.getLogger(__name__ + ".") + self._check_inputs() + self.math.validate() + @abstractmethod def add_parameter( self, parameter_name: str, parameter_values: xr.DataArray, default: Any = np.nan @@ -178,7 +175,7 @@ def add_objective( def log( self, - component_type: _COMPONENTS_T, + component_type: ALL_COMPONENTS_T, component_name: str, message: str, level: Literal["info", "warning", "debug", "error", "critical"] = "debug", @@ -186,7 +183,7 @@ def log( """Log to module-level logger with some prettification of the message. Args: - component_type (_COMPONENTS_T): type of component. + component_type (ALL_COMPONENTS_T): type of component. component_name (str): name of the component. message (str): message to log. level (Literal["info", "warning", "debug", "error", "critical"], optional): log level. Defaults to "debug". @@ -221,20 +218,39 @@ def _check_inputs(self): check_results["warn"], check_results["fail"] ) - def add_all_math(self): - """Parse and all the math stored in the input data.""" - self._add_run_mode_math() + def _validate_math_string_parsing(self) -> None: + """Validate that `expression` and `where` strings of the math dictionary can be successfully parsed. + + NOTE: strings are not checked for evaluation validity. + Evaluation issues will be raised only on adding a component to the backend. + """ + validation_errors: dict = dict() + for component_group in typing.get_args(ORDERED_COMPONENTS_T): + for name, dict_ in self.math.data[component_group].items(): + parsed = parsing.ParsedBackendComponent(component_group, name, dict_) + parsed.parse_top_level_where(errors="ignore") + parsed.parse_equations(self.valid_component_names, errors="ignore") + if not parsed._is_valid: + validation_errors[f"{component_group}:{name}"] = parsed._errors + + if validation_errors: + exceptions.print_warnings_and_raise_errors( + during="math string parsing (marker indicates where parsing stopped, but may not point to the root cause of the issue)", + errors=validation_errors, + ) + + LOGGER.info("Optimisation Model | Validated math strings.") + + def add_optimisation_components(self) -> None: + """Parse math and inputs and set optimisation problem.""" # The order of adding components matters! # 1. Variables, 2. Global Expressions, 3. Constraints, 4. Objectives - for components in [ - "variables", - "global_expressions", - "constraints", - "piecewise_constraints", - "objectives", - ]: + self._add_all_inputs_as_parameters() + if self.inputs.attrs["config"]["build"]["pre_validate_math_strings"]: + self._validate_math_string_parsing() + for components in typing.get_args(ORDERED_COMPONENTS_T): component = components.removesuffix("s") - for name, dict_ in self.inputs.math[components].items(): + for name, dict_ in self.math.data[components].items(): start = time.time() getattr(self, f"add_{component}")(name, dict_) end = time.time() - start @@ -243,38 +259,12 @@ def add_all_math(self): ) LOGGER.info(f"Optimisation Model | {components} | Generated.") - def _add_run_mode_math(self) -> None: - """If not given in the add_math list, override model math with run mode math.""" - # FIXME: available modes should not be hardcoded here. They should come from a YAML schema. - mode = self.inputs.attrs["config"].build.mode - add_math = self.inputs.attrs["applied_additional_math"] - not_run_mode = {"plan", "operate", "spores"}.difference([mode]) - run_mode_mismatch = not_run_mode.intersection(add_math) - if run_mode_mismatch: - exceptions.warn( - f"Running in {mode} mode, but run mode(s) {run_mode_mismatch} " - "math being loaded from file via the model configuration" - ) - - if mode != "plan" and mode not in add_math: - LOGGER.debug(f"Updating math formulation with {mode} mode math.") - filepath = importlib.resources.files("calliope") / "math" / f"{mode}.yaml" - self.inputs.math.union(AttrDict.from_yaml(filepath), allow_override=True) - - validate_dict(self.inputs.math, MATH_SCHEMA, "math") - def _add_component( self, name: str, component_dict: Tp, component_setter: Callable, - component_type: Literal[ - "variables", - "global_expressions", - "constraints", - "piecewise_constraints", - "objectives", - ], + component_type: ORDERED_COMPONENTS_T, break_early: bool = True, ) -> parsing.ParsedBackendComponent | None: """Generalised function to add a optimisation problem component array to the model. @@ -284,7 +274,7 @@ def _add_component( this name must be available in the input math provided on initialising the class. component_dict (Tp): unparsed YAML dictionary configuration. component_setter (Callable): function to combine evaluated xarray DataArrays into backend component objects. - component_type (Literal["variables", "global_expressions", "constraints", "objectives"]): + component_type (Literal["variables", "global_expressions", "constraints", "piecewise_constraints", "objectives"]): type of the added component. break_early (bool, optional): break if the component is not active. Defaults to True. @@ -297,8 +287,8 @@ def _add_component( """ references: set[str] = set() - if name not in self.inputs.math.get(component_type, {}): - self.inputs.math.set_key(f"{component_type}.name", component_dict) + if name not in self.math.data[component_type]: + self.math.add(AttrDict({f"{component_type}.{name}": component_dict})) if break_early and not component_dict.get("active", True): self.log( @@ -368,7 +358,7 @@ def _add_component( return parsed_component @abstractmethod - def delete_component(self, key: str, component_type: _COMPONENTS_T) -> None: + def delete_component(self, key: str, component_type: ALL_COMPONENTS_T) -> None: """Delete a list object from the backend model object. Args: @@ -377,7 +367,7 @@ def delete_component(self, key: str, component_type: _COMPONENTS_T) -> None: """ @abstractmethod - def _create_obj_list(self, key: str, component_type: _COMPONENTS_T) -> None: + def _create_obj_list(self, key: str, component_type: ALL_COMPONENTS_T) -> None: """Attach an empty list object to the backend model object. The attachment may be a backend-specific subclass of a standard list object. @@ -430,7 +420,7 @@ def _add_to_dataset( self, name: str, da: xr.DataArray, - obj_type: _COMPONENTS_T, + obj_type: ALL_COMPONENTS_T, unparsed_dict: parsing.UNPARSED_DICTS | dict, references: set | None = None, ): @@ -439,7 +429,7 @@ def _add_to_dataset( Args: name (str): Name of entry in dataset. da (xr.DataArray): Data to add. - obj_type (_COMPONENTS_T): Type of backend objects in the array. + obj_type (ALL_COMPONENTS_T): Type of backend objects in the array. unparsed_dict (parsing.UNPARSED_DICTS | dict): Dictionary describing the object being added, from which descriptor attributes will be extracted and added to the array attributes. @@ -535,7 +525,7 @@ def _apply_func( da = tuple(arr.fillna(np.nan) for arr in da) return da - def _raise_error_on_preexistence(self, key: str, obj_type: _COMPONENTS_T): + def _raise_error_on_preexistence(self, key: str, obj_type: ALL_COMPONENTS_T): """Detect if preexistance errors are present the dataset. We do not allow any overlap of backend object names since they all have to @@ -544,7 +534,7 @@ def _raise_error_on_preexistence(self, key: str, obj_type: _COMPONENTS_T): Args: key (str): Backend object name - obj_type (Literal["variables", "constraints", "objectives", "parameters", "expressions"]): Object type. + obj_type (ALL_COMPONENTS_T): Object type. Raises: BackendError: if `key` already exists in the backend model @@ -607,7 +597,7 @@ def _filter(val): in_math = set( name for component in ["variables", "global_expressions"] - for name in self.inputs.math[component].keys() + for name in self.math.data[component] ) return in_data.union(in_math) @@ -615,15 +605,18 @@ def _filter(val): class BackendModel(BackendModelGenerator, Generic[T]): """Calliope's backend model functionality.""" - def __init__(self, inputs: xr.Dataset, instance: T, **kwargs) -> None: + def __init__( + self, inputs: xr.Dataset, math: CalliopeMath, instance: T, **kwargs + ) -> None: """Abstract base class to build backend models that interface with solvers. Args: inputs (xr.Dataset): Calliope model data. + math (CalliopeMath): Calliope math. instance (T): Interface model instance. **kwargs: build configuration overrides. """ - super().__init__(inputs, **kwargs) + super().__init__(inputs, math, **kwargs) self._instance = instance self.shadow_prices: ShadowPrices self._has_verbose_strings: bool = False @@ -1026,19 +1019,13 @@ def _rebuild_references(self, references: set[str]) -> None: Args: references (set[str]): names of optimisation problem components. """ - ordered_components = [ - "variables", - "global_expressions", - "constraints", - "objectives", - ] - for component in ordered_components: + for component in typing.get_args(ORDERED_COMPONENTS_T): # Rebuild references in the order they are found in the backend dataset # which should correspond to the order they were added to the optimisation problem. refs = [k for k in getattr(self, component).data_vars if k in references] for ref in refs: self.delete_component(ref, component) - dict_ = self.inputs.attrs["math"][component][ref] + dict_ = self.math.data[component][ref] getattr(self, "add_" + component.removesuffix("s"))(ref, dict_) def _get_component(self, name: str, component_group: str) -> xr.DataArray: diff --git a/src/calliope/backend/expression_parser.py b/src/calliope/backend/expression_parser.py index 491399424..0bd4c676d 100644 --- a/src/calliope/backend/expression_parser.py +++ b/src/calliope/backend/expression_parser.py @@ -37,6 +37,7 @@ from typing import TYPE_CHECKING, Any, Literal, overload import numpy as np +import pandas as pd import pyparsing as pp import xarray as xr from typing_extensions import NotRequired, TypedDict, Unpack @@ -788,7 +789,7 @@ def as_array(self) -> xr.DataArray: # noqa: D102, override evaluated = backend_interface._dataset[self.name] except KeyError: evaluated = xr.DataArray(self.name, attrs={"obj_type": "string"}) - if "default" in evaluated.attrs: + if "default" in evaluated.attrs and pd.notna(evaluated.attrs["default"]): evaluated = evaluated.fillna(evaluated.attrs["default"]) self.eval_attrs["references"].add(self.name) diff --git a/src/calliope/backend/gurobi_backend_model.py b/src/calliope/backend/gurobi_backend_model.py index e5f8096d5..2d2e0a485 100644 --- a/src/calliope/backend/gurobi_backend_model.py +++ b/src/calliope/backend/gurobi_backend_model.py @@ -17,6 +17,7 @@ from calliope.backend import backend_model, parsing from calliope.exceptions import BackendError, BackendWarning from calliope.exceptions import warn as model_warn +from calliope.preprocess import CalliopeMath if importlib.util.find_spec("gurobipy") is not None: import gurobipy @@ -40,23 +41,22 @@ class GurobiBackendModel(backend_model.BackendModel): """gurobipy-specific backend functionality.""" - def __init__(self, inputs: xr.Dataset, **kwargs) -> None: + def __init__(self, inputs: xr.Dataset, math: CalliopeMath, **kwargs) -> None: """Gurobi solver interface class. Args: inputs (xr.Dataset): Calliope model data. + math (CalliopeMath): Calliope math. **kwargs: passed directly to the solver. """ if importlib.util.find_spec("gurobipy") is None: raise ImportError( "Install the `gurobipy` package to build the optimisation problem with the Gurobi backend." ) - super().__init__(inputs, gurobipy.Model(), **kwargs) + super().__init__(inputs, math, gurobipy.Model(), **kwargs) self._instance: gurobipy.Model self.shadow_prices = GurobiShadowPrices(self) - self._add_all_inputs_as_parameters() - def add_parameter( # noqa: D102, override self, parameter_name: str, parameter_values: xr.DataArray, default: Any = np.nan ) -> None: @@ -275,7 +275,7 @@ def _solve( def verbose_strings(self) -> None: # noqa: D102, override def __renamer(val, *idx, name: str, attr: str): - if pd.notnull(val): + if pd.notna(val): new_obj_name = f"{name}[{', '.join(idx)}]" setattr(val, attr, new_obj_name) @@ -389,7 +389,7 @@ def update_variable_bounds( # noqa: D102, override ) continue - existing_bound_param = self.inputs.attrs["math"].get_key( + existing_bound_param = self.math.data.get_key( f"variables.{name}.bounds.{bound_name}", None ) if existing_bound_param in self.parameters: diff --git a/src/calliope/backend/latex_backend_model.py b/src/calliope/backend/latex_backend_model.py index 0256af5aa..c33229b0a 100644 --- a/src/calliope/backend/latex_backend_model.py +++ b/src/calliope/backend/latex_backend_model.py @@ -5,130 +5,21 @@ import logging import re import textwrap -import typing -from pathlib import Path -from typing import Any, Literal, overload +from typing import Any, Literal import jinja2 import numpy as np +import pandas as pd import xarray as xr from calliope.backend import backend_model, parsing from calliope.exceptions import ModelError +from calliope.preprocess import CalliopeMath -_ALLOWED_MATH_FILE_FORMATS = Literal["tex", "rst", "md"] - - +ALLOWED_MATH_FILE_FORMATS = Literal["tex", "rst", "md"] LOGGER = logging.getLogger(__name__) -class MathDocumentation: - """For math documentation.""" - - def __init__(self) -> None: - """Math documentation builder/writer. - - Args: - backend_builder (Callable): - Method to generate all optimisation problem components on a calliope.backend_model.BackendModel object. - """ - self._inputs: xr.Dataset - - def build(self, include: Literal["all", "valid"] = "all", **kwargs) -> None: - """Build string representations of the mathematical formulation using LaTeX math notation, ready to be written with `write`. - - Args: - include (Literal["all", "valid"], optional): - Defines whether to include all possible math equations ("all") or only - those for which at least one index item in the "where" string is valid - ("valid"). Defaults to "all". - **kwargs: kwargs for the LaTeX backend. - """ - backend = LatexBackendModel(self._inputs, include=include, **kwargs) - backend.add_all_math() - - self._instance = backend - - @property - def inputs(self): - """Getter for backend inputs.""" - return self._inputs - - @inputs.setter - def inputs(self, val: xr.Dataset): - """Setter for backend inputs.""" - self._inputs = val - - # Expecting string if not giving filename. - @overload - def write( - self, - filename: Literal[None] = None, - mkdocs_features: bool = False, - format: _ALLOWED_MATH_FILE_FORMATS | None = None, - ) -> str: ... - - # Expecting None (and format arg is not needed) if giving filename. - @overload - def write(self, filename: str | Path, mkdocs_features: bool = False) -> None: ... - - def write( - self, - filename: str | Path | None = None, - mkdocs_features: bool = False, - format: _ALLOWED_MATH_FILE_FORMATS | None = None, - ) -> str | None: - """Write model documentation. - - `build` must be run beforehand. - - Args: - filename (str | Path | None, optional): - If given, will write the built mathematical formulation to a file with - the given extension as the file format. Defaults to None. - mkdocs_features (bool, optional): - If True and Markdown docs are being generated, then: - - the equations will be on a tab and the original YAML math definition will be on another tab; - - the equation cross-references will be given in a drop-down list. - Defaults to False. - format (_ALLOWED_MATH_FILE_FORMATS | None, optional): - Not required if filename is given (as the format will be automatically inferred). - Required if expecting a string return from calling this function. The LaTeX math will be embedded in a document of the given format (tex=LaTeX, rst=reStructuredText, md=Markdown). - Defaults to None. - - Raises: - exceptions.ModelError: Math strings need to be built first (`build`) - ValueError: The file format (inferred automatically from `filename` or given by `format`) must be one of ["tex", "rst", "md"]. - - Returns: - str | None: - If `filename` is None, the built mathematical formulation documentation will be returned as a string. - """ - if not hasattr(self, "_instance"): - raise ModelError( - "Build the documentation (`build`) before trying to write it" - ) - - if format is None and filename is not None: - format = Path(filename).suffix.removeprefix(".") # type: ignore - LOGGER.info( - f"Inferring math documentation format from filename as `{format}`." - ) - - allowed_formats = typing.get_args(_ALLOWED_MATH_FILE_FORMATS) - if format is None or format not in allowed_formats: - raise ValueError( - f"Math documentation format must be one of {allowed_formats}, received `{format}`" - ) - populated_doc = self._instance.generate_math_doc(format, mkdocs_features) - - if filename is None: - return populated_doc - else: - Path(filename).write_text(populated_doc) - return None - - class LatexBackendModel(backend_model.BackendModelGenerator): """Calliope's LaTeX backend.""" @@ -224,6 +115,10 @@ class LatexBackendModel(backend_model.BackendModelGenerator): **Default**: {{ equation.default }} {% endif %} + {% if equation.type is not none %} + + **Type**: {{ equation.type }} + {% endif %} {% if equation.expression != "" %} .. container:: scrolling-wrapper @@ -299,6 +194,10 @@ class LatexBackendModel(backend_model.BackendModelGenerator): \textbf{Default}: {{ equation.default }} {% endif %} + {% if equation.type is not none %} + + \textbf{Type}: {{ equation.type }} + {% endif %} {% if equation.expression != "" %} \begin{equation} @@ -371,6 +270,10 @@ class LatexBackendModel(backend_model.BackendModelGenerator): **Default**: {{ equation.default }} {% endif %} + {% if equation.type is not none %} + + **Type**: {{ equation.type }} + {% endif %} {% if equation.expression != "" %} {% if mkdocs_features and yaml_snippet is not none%} @@ -399,21 +302,24 @@ class LatexBackendModel(backend_model.BackendModelGenerator): FORMAT_STRINGS = {"rst": RST_DOC, "tex": TEX_DOC, "md": MD_DOC} def __init__( - self, inputs: xr.Dataset, include: Literal["all", "valid"] = "all", **kwargs + self, + inputs: xr.Dataset, + math: CalliopeMath, + include: Literal["all", "valid"] = "all", + **kwargs, ) -> None: """Interface to build a string representation of the mathematical formulation using LaTeX math notation. Args: inputs (xr.Dataset): model data. + math (CalliopeMath): Calliope math. include (Literal["all", "valid"], optional): Defines whether to include all possible math equations ("all") or only those for which at least one index item in the "where" string is valid ("valid"). Defaults to "all". **kwargs: for the backend model generator. """ - super().__init__(inputs, **kwargs) + super().__init__(inputs, math, **kwargs) self.include = include - self._add_all_inputs_as_parameters() - def add_parameter( # noqa: D102, override self, parameter_name: str, parameter_values: xr.DataArray, default: Any = np.nan ) -> None: @@ -421,14 +327,17 @@ def add_parameter( # noqa: D102, override "title": self._PARAM_TITLES.get(parameter_name, None), "description": self._PARAM_DESCRIPTIONS.get(parameter_name, None), "unit": self._PARAM_UNITS.get(parameter_name, None), + "type": self._PARAM_TYPE.get(parameter_name, None), "math_repr": rf"\textit{{{parameter_name}}}" + self._dims_to_var_string(parameter_values), } + if pd.notna(default): + attrs["default"] = default self._add_to_dataset(parameter_name, parameter_values, "parameters", attrs) def add_constraint( # noqa: D102, override - self, name: str, constraint_dict: parsing.UnparsedConstraint | None = None + self, name: str, constraint_dict: parsing.UnparsedConstraint ) -> None: equation_strings: list = [] @@ -488,7 +397,7 @@ def _constraint_setter(where: xr.DataArray, references: set) -> xr.DataArray: ) def add_global_expression( # noqa: D102, override - self, name: str, expression_dict: parsing.UnparsedExpression | None = None + self, name: str, expression_dict: parsing.UnparsedExpression ) -> None: equation_strings: list = [] @@ -515,7 +424,7 @@ def _expression_setter( ) def add_variable( # noqa: D102, override - self, name: str, variable_dict: parsing.UnparsedVariable | None = None + self, name: str, variable_dict: parsing.UnparsedVariable ) -> None: domain_dict = {"real": r"\mathbb{R}\;", "integer": r"\mathbb{Z}\;"} bound_refs: set = set() @@ -523,8 +432,6 @@ def add_variable( # noqa: D102, override def _variable_setter(where: xr.DataArray, references: set) -> xr.DataArray: return where.where(where) - domain = domain_dict[variable_dict.get("domain", "real")] - parsed_component = self._add_component( name, variable_dict, _variable_setter, "variables", break_early=False ) @@ -544,7 +451,7 @@ def _variable_setter(where: xr.DataArray, references: set) -> xr.DataArray: ) def add_objective( # noqa: D102, override - self, name: str, objective_dict: parsing.UnparsedObjective | None = None + self, name: str, objective_dict: parsing.UnparsedObjective ) -> None: sense_dict = { "minimize": r"\min{}", @@ -552,7 +459,6 @@ def add_objective( # noqa: D102, override "minimise": r"\min{}", "maximise": r"\max{}", } - equation_strings: list = [] def _objective_setter( @@ -583,7 +489,7 @@ def delete_component( # noqa: D102, override del self._dataset[key] def generate_math_doc( - self, format: _ALLOWED_MATH_FILE_FORMATS = "tex", mkdocs_features: bool = False + self, format: ALLOWED_MATH_FILE_FORMATS = "tex", mkdocs_features: bool = False ) -> str: """Generate the math documentation by embedding LaTeX math in a template. @@ -623,6 +529,7 @@ def generate_math_doc( ), "uses": sorted(list(uses[name] - set([name]))), "default": da.attrs.get("default", None), + "type": da.attrs.get("type", None), "unit": da.attrs.get("unit", None), "yaml_snippet": da.attrs.get("yaml_snippet", None), } @@ -640,7 +547,7 @@ def generate_math_doc( ] if getattr(self, objtype).data_vars } - if not components["parameters"]: + if "parameters" in components and not components["parameters"]: del components["parameters"] return self._render( doc_template, mkdocs_features=mkdocs_features, components=components diff --git a/src/calliope/backend/parsing.py b/src/calliope/backend/parsing.py index 278f037ac..33c9ea477 100644 --- a/src/calliope/backend/parsing.py +++ b/src/calliope/backend/parsing.py @@ -850,13 +850,13 @@ def generate_top_level_where_array( return where def raise_caught_errors(self): - """If there are any parsing errors, pipe them to the ModelError bullet point list generator.""" + """Pipe parsing errors to the ModelError bullet point list generator.""" if not self._is_valid: exceptions.print_warnings_and_raise_errors( errors={f"{self.name}": self._errors}, during=( "math string parsing (marker indicates where parsing stopped, " - "which might not be the root cause of the issue; sorry...)" + "but may not point to the root cause of the issue)" ), bullet=self._ERR_BULLET, ) diff --git a/src/calliope/backend/pyomo_backend_model.py b/src/calliope/backend/pyomo_backend_model.py index 228b085a0..5ba41ba0a 100644 --- a/src/calliope/backend/pyomo_backend_model.py +++ b/src/calliope/backend/pyomo_backend_model.py @@ -26,11 +26,13 @@ from pyomo.opt import SolverFactory # type: ignore from pyomo.util.model_size import build_model_size_report # type: ignore -from calliope.backend import backend_model, parsing from calliope.exceptions import BackendError, BackendWarning from calliope.exceptions import warn as model_warn +from calliope.preprocess import CalliopeMath from calliope.util.logging import LogWriter +from . import backend_model, parsing + T = TypeVar("T") _COMPONENTS_T = Literal[ "variables", @@ -56,14 +58,15 @@ class PyomoBackendModel(backend_model.BackendModel): """Pyomo-specific backend functionality.""" - def __init__(self, inputs: xr.Dataset, **kwargs) -> None: + def __init__(self, inputs: xr.Dataset, math: CalliopeMath, **kwargs) -> None: """Pyomo solver interface class. Args: inputs (xr.Dataset): Calliope model data. + math (CalliopeMath): Calliope math. **kwargs: passed directly to the solver. """ - super().__init__(inputs, pmo.block(), **kwargs) + super().__init__(inputs, math, pmo.block(), **kwargs) self._instance.parameters = pmo.parameter_dict() self._instance.variables = pmo.variable_dict() @@ -75,8 +78,6 @@ def __init__(self, inputs: xr.Dataset, **kwargs) -> None: self._instance.dual = pmo.suffix(direction=pmo.suffix.IMPORT) self.shadow_prices = PyomoShadowPrices(self._instance.dual, self) - self._add_all_inputs_as_parameters() - def add_parameter( # noqa: D102, override self, parameter_name: str, parameter_values: xr.DataArray, default: Any = np.nan ) -> None: @@ -330,7 +331,7 @@ def _solve( # noqa: D102, override def verbose_strings(self) -> None: # noqa: D102, override def __renamer(val, *idx): - if pd.notnull(val): + if pd.notna(val): val.calliope_coords = idx with self._datetime_as_string(self._dataset): @@ -460,7 +461,7 @@ def update_variable_bounds( # noqa: D102, override ) continue - existing_bound_param = self.inputs.attrs["math"].get_key( + existing_bound_param = self.math.data.get_key( f"variables.{name}.bounds.{bound_name}", None ) if existing_bound_param in self.parameters: diff --git a/src/calliope/config/config_schema.yaml b/src/calliope/config/config_schema.yaml index e9797429f..b9ebe6277 100644 --- a/src/calliope/config/config_schema.yaml +++ b/src/calliope/config/config_schema.yaml @@ -51,17 +51,6 @@ properties: type: string default: "ISO8601" description: Timestamp format of all time series data when read from file. "ISO8601" means "%Y-%m-%d %H:%M:%S". - add_math: - type: array - default: [] - description: List of references to files which contain additional mathematical formulations to be applied on top of the base math. - uniqueItems: true - items: - type: string - description: > - If referring to an pre-defined Calliope math file (see documentation for available files), do not append the reference with ".yaml". - If referring to your own math file, ensure the file type is given as a suffix (".yaml" or ".yml"). - Relative paths will be assumed to be relative to the model definition file given when creating a calliope Model (`calliope.Model(model_definition=...)`) distance_unit: type: string default: km @@ -77,6 +66,23 @@ properties: Additional configuration items will be passed onto math string parsing and can therefore be accessed in the `where` strings by `config.[item-name]`, where "[item-name]" is the name of your own configuration item. additionalProperties: true properties: + add_math: + type: array + default: [] + description: List of references to files which contain additional mathematical formulations to be applied on top of or instead of the base mode math. + uniqueItems: true + items: + type: string + description: > + If referring to an pre-defined Calliope math file (see documentation for available files), do not append the reference with ".yaml". + If referring to your own math file, ensure the file type is given as a suffix (".yaml" or ".yml"). + Relative paths will be assumed to be relative to the model definition file given when creating a calliope Model (`calliope.Model(model_definition=...)`). + ignore_mode_math: + type: boolean + default: false + description: >- + If True, do not initialise the mathematical formulation with the pre-defined math for the given run `mode`. + This option can be used to completely re-define the Calliope mathematical formulation. backend: type: string default: pyomo @@ -111,6 +117,12 @@ properties: type: boolean default: false description: If the model already contains `plan` mode results, use those optimal capacities as input parameters to the `operate` mode run. + pre_validate_math_strings: + type: boolean + default: true + description: >- + If true, the Calliope math definition will be scanned for parsing errors _before_ undertaking the much more expensive operation of building the optimisation problem. + You can switch this off (e.g., if you know there are no parsing errors) to reduce overall build time. solve: type: object @@ -179,7 +191,7 @@ properties: patternProperties: '^[^_^\d][\w]*$': {} - data_sources: + data_tables: type: [object, "null"] description: >- Reference to files from which to load parts (or all) of the model definition. diff --git a/src/calliope/config/data_source_schema.yaml b/src/calliope/config/data_table_schema.yaml similarity index 92% rename from src/calliope/config/data_source_schema.yaml rename to src/calliope/config/data_table_schema.yaml index c11f025c5..cf6471249 100644 --- a/src/calliope/config/data_source_schema.yaml +++ b/src/calliope/config/data_table_schema.yaml @@ -2,13 +2,13 @@ # yaml-language-server: $schema=https://json-schema.org/draft/2020-12/schema# $schema: https://json-schema.org/draft/2020-12/schema# -title: Data source schema +title: Data table schema description: All options available to load model definition data from file. type: object additionalProperties: false -required: ["source"] +required: ["data"] $defs: - DataSourceVals: + DataTableVals: oneOf: - type: "null" - type: string @@ -25,14 +25,14 @@ properties: Names of dimensions defined row-wise. Each name should correspond to a column in your data that contains index items. These columns must be to the left of the columns containing your data. - $ref: "#/$defs/DataSourceVals" + $ref: "#/$defs/DataTableVals" columns: description: >- Names of dimensions defined column-wise. Each name should correspond to a row in your data that contains index items. These rows must be above the rows containing your data. - $ref: "#/$defs/DataSourceVals" - source: + $ref: "#/$defs/DataTableVals" + data: description: >- Relative or absolute filepath. If relative, will be relative to the model config file used to initialise the model. @@ -52,7 +52,7 @@ properties: type: [string, boolean, number] drop: type: [string, array] - $ref: "#/$defs/DataSourceVals" + $ref: "#/$defs/DataTableVals" description: >- Dimensions in the rows and/or columns that contain metadata and should therefore not be passed on to the loaded model dataset. These could include comments on the source of the data, the data license, or the parameter units. @@ -70,4 +70,4 @@ properties: '^[^_^\d][\w]*$': type: [string, array] description: Keys are dimension names (must not be in `rows` or `columns`), values are index items of that dimension to add. - $ref: "#/$defs/DataSourceVals" \ No newline at end of file + $ref: "#/$defs/DataTableVals" diff --git a/src/calliope/config/protected_parameters.yaml b/src/calliope/config/protected_parameters.yaml index 8ed48d944..6d0efbd39 100644 --- a/src/calliope/config/protected_parameters.yaml +++ b/src/calliope/config/protected_parameters.yaml @@ -1,4 +1,4 @@ -# Parameters for which loading from file via `data_sources` is prohibited +# Parameters for which loading from file via `data_tables` is prohibited active: >- Technology/Node activation (`active`) can only be used in the YAML model definition. diff --git a/src/calliope/example_models/national_scale/data_sources/cluster_days.csv b/src/calliope/example_models/national_scale/data_sources/cluster_days.csv deleted file mode 100644 index 4cd9e78da..000000000 --- a/src/calliope/example_models/national_scale/data_sources/cluster_days.csv +++ /dev/null @@ -1,366 +0,0 @@ -datesteps,cluster -2005-01-01,2005-12-09 -2005-01-02,2005-12-08 -2005-01-03,2005-12-08 -2005-01-04,2005-12-09 -2005-01-05,2005-12-09 -2005-01-06,2005-12-06 -2005-01-07,2005-12-06 -2005-01-08,2005-12-06 -2005-01-09,2005-12-06 -2005-01-10,2005-12-06 -2005-01-11,2005-12-06 -2005-01-12,2005-12-06 -2005-01-13,2005-12-06 -2005-01-14,2005-12-09 -2005-01-15,2005-12-09 -2005-01-16,2005-12-08 -2005-01-17,2005-12-08 -2005-01-18,2005-12-06 -2005-01-19,2005-12-06 -2005-01-20,2005-12-06 -2005-01-21,2005-12-08 -2005-01-22,2005-12-08 -2005-01-23,2005-12-08 -2005-01-24,2005-12-06 -2005-01-25,2005-12-08 -2005-01-26,2005-12-06 -2005-01-27,2005-12-06 -2005-01-28,2005-12-06 -2005-01-29,2005-12-06 -2005-01-30,2005-12-08 -2005-01-31,2005-02-17 -2005-02-01,2005-12-08 -2005-02-02,2005-12-06 -2005-02-03,2005-02-17 -2005-02-04,2005-12-06 -2005-02-05,2005-12-09 -2005-02-06,2005-12-09 -2005-02-07,2005-02-17 -2005-02-08,2005-12-06 -2005-02-09,2005-12-06 -2005-02-10,2005-12-06 -2005-02-11,2005-12-06 -2005-02-12,2005-12-06 -2005-02-13,2005-12-06 -2005-02-14,2005-12-08 -2005-02-15,2005-02-17 -2005-02-16,2005-12-08 -2005-02-17,2005-12-06 -2005-02-18,2005-12-08 -2005-02-19,2005-12-06 -2005-02-20,2005-12-06 -2005-02-21,2005-02-17 -2005-02-22,2005-12-06 -2005-02-23,2005-12-09 -2005-02-24,2005-02-17 -2005-02-25,2005-09-13 -2005-02-26,2005-04-13 -2005-02-27,2005-12-08 -2005-02-28,2005-12-08 -2005-03-01,2005-12-08 -2005-03-02,2005-12-09 -2005-03-03,2005-02-17 -2005-03-04,2005-09-13 -2005-03-05,2005-12-08 -2005-03-06,2005-02-17 -2005-03-07,2005-04-13 -2005-03-08,2005-04-13 -2005-03-09,2005-04-13 -2005-03-10,2005-04-13 -2005-03-11,2005-04-13 -2005-03-12,2005-04-27 -2005-03-13,2005-04-27 -2005-03-14,2005-12-06 -2005-03-15,2005-09-13 -2005-03-16,2005-09-13 -2005-03-17,2005-09-13 -2005-03-18,2005-09-13 -2005-03-19,2005-04-27 -2005-03-20,2005-04-13 -2005-03-21,2005-04-27 -2005-03-22,2005-04-13 -2005-03-23,2005-04-27 -2005-03-24,2005-09-13 -2005-03-25,2005-09-13 -2005-03-26,2005-04-13 -2005-03-27,2005-04-13 -2005-03-28,2005-12-06 -2005-03-29,2005-04-13 -2005-03-30,2005-04-13 -2005-03-31,2005-04-13 -2005-04-01,2005-04-13 -2005-04-02,2005-12-09 -2005-04-03,2005-12-08 -2005-04-04,2005-09-13 -2005-04-05,2005-09-13 -2005-04-06,2005-09-13 -2005-04-07,2005-04-13 -2005-04-08,2005-12-09 -2005-04-09,2005-08-09 -2005-04-10,2005-08-09 -2005-04-11,2005-08-09 -2005-04-12,2005-08-09 -2005-04-13,2005-08-09 -2005-04-14,2005-04-13 -2005-04-15,2005-04-13 -2005-04-16,2005-09-13 -2005-04-17,2005-09-09 -2005-04-18,2005-05-25 -2005-04-19,2005-08-09 -2005-04-20,2005-08-09 -2005-04-21,2005-09-09 -2005-04-22,2005-09-09 -2005-04-23,2005-09-09 -2005-04-24,2005-09-13 -2005-04-25,2005-08-09 -2005-04-26,2005-08-09 -2005-04-27,2005-08-09 -2005-04-28,2005-08-09 -2005-04-29,2005-09-09 -2005-04-30,2005-09-09 -2005-05-01,2005-09-09 -2005-05-02,2005-09-09 -2005-05-03,2005-09-09 -2005-05-04,2005-08-09 -2005-05-05,2005-08-09 -2005-05-06,2005-09-09 -2005-05-07,2005-08-09 -2005-05-08,2005-09-09 -2005-05-09,2005-09-13 -2005-05-10,2005-08-09 -2005-05-11,2005-04-27 -2005-05-12,2005-04-27 -2005-05-13,2005-04-27 -2005-05-14,2005-09-09 -2005-05-15,2005-08-09 -2005-05-16,2005-04-27 -2005-05-17,2005-08-09 -2005-05-18,2005-08-09 -2005-05-19,2005-08-09 -2005-05-20,2005-09-09 -2005-05-21,2005-05-25 -2005-05-22,2005-08-09 -2005-05-23,2005-09-09 -2005-05-24,2005-08-09 -2005-05-25,2005-09-09 -2005-05-26,2005-09-09 -2005-05-27,2005-09-09 -2005-05-28,2005-09-09 -2005-05-29,2005-04-27 -2005-05-30,2005-04-27 -2005-05-31,2005-12-09 -2005-06-01,2005-09-09 -2005-06-02,2005-09-09 -2005-06-03,2005-08-09 -2005-06-04,2005-08-09 -2005-06-05,2005-08-09 -2005-06-06,2005-08-09 -2005-06-07,2005-08-09 -2005-06-08,2005-08-09 -2005-06-09,2005-08-09 -2005-06-10,2005-08-09 -2005-06-11,2005-09-09 -2005-06-12,2005-08-09 -2005-06-13,2005-09-13 -2005-06-14,2005-08-09 -2005-06-15,2005-05-25 -2005-06-16,2005-08-09 -2005-06-17,2005-08-09 -2005-06-18,2005-08-09 -2005-06-19,2005-08-09 -2005-06-20,2005-08-09 -2005-06-21,2005-08-09 -2005-06-22,2005-09-13 -2005-06-23,2005-04-27 -2005-06-24,2005-08-09 -2005-06-25,2005-08-09 -2005-06-26,2005-08-09 -2005-06-27,2005-04-27 -2005-06-28,2005-08-09 -2005-06-29,2005-09-09 -2005-06-30,2005-08-09 -2005-07-01,2005-08-09 -2005-07-02,2005-08-09 -2005-07-03,2005-08-09 -2005-07-04,2005-09-09 -2005-07-05,2005-09-09 -2005-07-06,2005-08-09 -2005-07-07,2005-08-09 -2005-07-08,2005-08-09 -2005-07-09,2005-08-09 -2005-07-10,2005-08-09 -2005-07-11,2005-08-09 -2005-07-12,2005-08-09 -2005-07-13,2005-08-09 -2005-07-14,2005-08-09 -2005-07-15,2005-09-09 -2005-07-16,2005-08-09 -2005-07-17,2005-08-09 -2005-07-18,2005-08-09 -2005-07-19,2005-08-09 -2005-07-20,2005-08-09 -2005-07-21,2005-08-09 -2005-07-22,2005-08-09 -2005-07-23,2005-08-09 -2005-07-24,2005-08-09 -2005-07-25,2005-08-09 -2005-07-26,2005-08-09 -2005-07-27,2005-09-13 -2005-07-28,2005-09-13 -2005-07-29,2005-08-09 -2005-07-30,2005-08-09 -2005-07-31,2005-08-09 -2005-08-01,2005-08-09 -2005-08-02,2005-09-09 -2005-08-03,2005-08-09 -2005-08-04,2005-08-09 -2005-08-05,2005-09-09 -2005-08-06,2005-09-09 -2005-08-07,2005-09-09 -2005-08-08,2005-09-13 -2005-08-09,2005-09-13 -2005-08-10,2005-04-27 -2005-08-11,2005-05-25 -2005-08-12,2005-08-09 -2005-08-13,2005-08-09 -2005-08-14,2005-08-09 -2005-08-15,2005-08-09 -2005-08-16,2005-09-13 -2005-08-17,2005-08-09 -2005-08-18,2005-08-09 -2005-08-19,2005-04-27 -2005-08-20,2005-08-09 -2005-08-21,2005-08-09 -2005-08-22,2005-08-09 -2005-08-23,2005-08-09 -2005-08-24,2005-08-09 -2005-08-25,2005-09-13 -2005-08-26,2005-08-09 -2005-08-27,2005-08-09 -2005-08-28,2005-05-25 -2005-08-29,2005-09-13 -2005-08-30,2005-08-09 -2005-08-31,2005-09-13 -2005-09-01,2005-08-09 -2005-09-02,2005-08-09 -2005-09-03,2005-05-25 -2005-09-04,2005-09-13 -2005-09-05,2005-09-13 -2005-09-06,2005-09-13 -2005-09-07,2005-04-27 -2005-09-08,2005-09-13 -2005-09-09,2005-05-25 -2005-09-10,2005-08-09 -2005-09-11,2005-09-13 -2005-09-12,2005-09-13 -2005-09-13,2005-09-13 -2005-09-14,2005-08-09 -2005-09-15,2005-09-13 -2005-09-16,2005-04-13 -2005-09-17,2005-04-27 -2005-09-18,2005-09-13 -2005-09-19,2005-09-13 -2005-09-20,2005-09-13 -2005-09-21,2005-09-13 -2005-09-22,2005-09-13 -2005-09-23,2005-12-09 -2005-09-24,2005-09-09 -2005-09-25,2005-09-13 -2005-09-26,2005-05-25 -2005-09-27,2005-05-25 -2005-09-28,2005-05-25 -2005-09-29,2005-09-13 -2005-09-30,2005-05-25 -2005-10-01,2005-05-25 -2005-10-02,2005-04-13 -2005-10-03,2005-09-13 -2005-10-04,2005-09-13 -2005-10-05,2005-09-13 -2005-10-06,2005-09-13 -2005-10-07,2005-09-13 -2005-10-08,2005-04-27 -2005-10-09,2005-04-27 -2005-10-10,2005-12-09 -2005-10-11,2005-12-09 -2005-10-12,2005-04-27 -2005-10-13,2005-04-27 -2005-10-14,2005-12-06 -2005-10-15,2005-09-13 -2005-10-16,2005-12-08 -2005-10-17,2005-02-17 -2005-10-18,2005-04-27 -2005-10-19,2005-09-13 -2005-10-20,2005-04-27 -2005-10-21,2005-05-25 -2005-10-22,2005-04-27 -2005-10-23,2005-09-13 -2005-10-24,2005-09-13 -2005-10-25,2005-09-13 -2005-10-26,2005-09-13 -2005-10-27,2005-09-13 -2005-10-28,2005-04-27 -2005-10-29,2005-09-13 -2005-10-30,2005-12-06 -2005-10-31,2005-09-13 -2005-11-01,2005-04-27 -2005-11-02,2005-12-08 -2005-11-03,2005-12-09 -2005-11-04,2005-12-08 -2005-11-05,2005-12-08 -2005-11-06,2005-12-08 -2005-11-07,2005-12-06 -2005-11-08,2005-09-13 -2005-11-09,2005-09-13 -2005-11-10,2005-04-27 -2005-11-11,2005-04-27 -2005-11-12,2005-04-27 -2005-11-13,2005-12-09 -2005-11-14,2005-04-27 -2005-11-15,2005-04-27 -2005-11-16,2005-12-06 -2005-11-17,2005-12-09 -2005-11-18,2005-12-09 -2005-11-19,2005-12-09 -2005-11-20,2005-12-09 -2005-11-21,2005-12-09 -2005-11-22,2005-12-06 -2005-11-23,2005-12-08 -2005-11-24,2005-12-08 -2005-11-25,2005-02-17 -2005-11-26,2005-12-08 -2005-11-27,2005-12-08 -2005-11-28,2005-12-06 -2005-11-29,2005-12-09 -2005-11-30,2005-12-06 -2005-12-01,2005-12-06 -2005-12-02,2005-12-09 -2005-12-03,2005-12-06 -2005-12-04,2005-12-06 -2005-12-05,2005-12-06 -2005-12-06,2005-12-06 -2005-12-07,2005-12-06 -2005-12-08,2005-12-06 -2005-12-09,2005-12-06 -2005-12-10,2005-12-06 -2005-12-11,2005-12-06 -2005-12-12,2005-12-06 -2005-12-13,2005-12-06 -2005-12-14,2005-12-06 -2005-12-15,2005-12-08 -2005-12-16,2005-12-08 -2005-12-17,2005-12-06 -2005-12-18,2005-12-08 -2005-12-19,2005-12-09 -2005-12-20,2005-12-09 -2005-12-21,2005-12-06 -2005-12-22,2005-12-06 -2005-12-23,2005-12-09 -2005-12-24,2005-12-09 -2005-12-25,2005-12-09 -2005-12-26,2005-12-09 -2005-12-27,2005-12-09 -2005-12-28,2005-12-09 -2005-12-29,2005-12-06 -2005-12-30,2005-12-09 -2005-12-31,2005-12-09 diff --git a/src/calliope/example_models/national_scale/data_tables/cluster_days.csv b/src/calliope/example_models/national_scale/data_tables/cluster_days.csv new file mode 100644 index 000000000..587e3dd87 --- /dev/null +++ b/src/calliope/example_models/national_scale/data_tables/cluster_days.csv @@ -0,0 +1,366 @@ +timesteps,cluster +2005-01-01,2005-11-04 +2005-01-02,2005-11-04 +2005-01-03,2005-12-07 +2005-01-04,2005-12-07 +2005-01-05,2005-12-07 +2005-01-06,2005-12-07 +2005-01-07,2005-12-07 +2005-01-08,2005-11-01 +2005-01-09,2005-12-07 +2005-01-10,2005-12-07 +2005-01-11,2005-12-07 +2005-01-12,2005-12-07 +2005-01-13,2005-12-07 +2005-01-14,2005-12-07 +2005-01-15,2005-11-04 +2005-01-16,2005-12-07 +2005-01-17,2005-12-07 +2005-01-18,2005-12-07 +2005-01-19,2005-12-07 +2005-01-20,2005-12-07 +2005-01-21,2005-11-04 +2005-01-22,2005-11-04 +2005-01-23,2005-12-07 +2005-01-24,2005-12-07 +2005-01-25,2005-12-07 +2005-01-26,2005-12-07 +2005-01-27,2005-12-07 +2005-01-28,2005-12-07 +2005-01-29,2005-11-01 +2005-01-30,2005-12-07 +2005-01-31,2005-12-07 +2005-02-01,2005-12-07 +2005-02-02,2005-12-07 +2005-02-03,2005-12-07 +2005-02-04,2005-12-07 +2005-02-05,2005-11-04 +2005-02-06,2005-12-07 +2005-02-07,2005-12-07 +2005-02-08,2005-12-07 +2005-02-09,2005-12-07 +2005-02-10,2005-12-07 +2005-02-11,2005-11-01 +2005-02-12,2005-11-01 +2005-02-13,2005-12-07 +2005-02-14,2005-12-07 +2005-02-15,2005-12-07 +2005-02-16,2005-12-07 +2005-02-17,2005-12-07 +2005-02-18,2005-11-04 +2005-02-19,2005-11-01 +2005-02-20,2005-12-07 +2005-02-21,2005-12-07 +2005-02-22,2005-12-07 +2005-02-23,2005-12-07 +2005-02-24,2005-12-07 +2005-02-25,2005-11-01 +2005-02-26,2005-11-04 +2005-02-27,2005-12-07 +2005-02-28,2005-12-07 +2005-03-01,2005-12-07 +2005-03-02,2005-12-07 +2005-03-03,2005-12-07 +2005-03-04,2005-11-01 +2005-03-05,2005-11-04 +2005-03-06,2005-12-07 +2005-03-07,2005-03-09 +2005-03-08,2005-03-09 +2005-03-09,2005-03-09 +2005-03-10,2005-03-09 +2005-03-11,2005-03-09 +2005-03-12,2005-11-01 +2005-03-13,2005-11-01 +2005-03-14,2005-03-09 +2005-03-15,2005-03-09 +2005-03-16,2005-03-09 +2005-03-17,2005-03-09 +2005-03-18,2005-09-19 +2005-03-19,2005-11-01 +2005-03-20,2005-03-09 +2005-03-21,2005-11-01 +2005-03-22,2005-03-09 +2005-03-23,2005-11-01 +2005-03-24,2005-09-19 +2005-03-25,2005-11-01 +2005-03-26,2005-11-04 +2005-03-27,2005-03-09 +2005-03-28,2005-03-09 +2005-03-29,2005-03-09 +2005-03-30,2005-03-09 +2005-03-31,2005-03-09 +2005-04-01,2005-11-04 +2005-04-02,2005-11-04 +2005-04-03,2005-11-04 +2005-04-04,2005-09-19 +2005-04-05,2005-09-19 +2005-04-06,2005-09-19 +2005-04-07,2005-03-09 +2005-04-08,2005-11-04 +2005-04-09,2005-09-19 +2005-04-10,2005-09-19 +2005-04-11,2005-09-19 +2005-04-12,2005-09-19 +2005-04-13,2005-09-19 +2005-04-14,2005-11-04 +2005-04-15,2005-11-04 +2005-04-16,2005-09-19 +2005-04-17,2005-05-02 +2005-04-18,2005-09-28 +2005-04-19,2005-09-19 +2005-04-20,2005-09-19 +2005-04-21,2005-05-02 +2005-04-22,2005-05-02 +2005-04-23,2005-05-02 +2005-04-24,2005-09-19 +2005-04-25,2005-09-19 +2005-04-26,2005-09-19 +2005-04-27,2005-09-19 +2005-04-28,2005-09-19 +2005-04-29,2005-05-02 +2005-04-30,2005-05-02 +2005-05-01,2005-05-02 +2005-05-02,2005-05-02 +2005-05-03,2005-05-02 +2005-05-04,2005-09-19 +2005-05-05,2005-09-19 +2005-05-06,2005-05-02 +2005-05-07,2005-09-19 +2005-05-08,2005-05-02 +2005-05-09,2005-05-13 +2005-05-10,2005-05-13 +2005-05-11,2005-05-13 +2005-05-12,2005-05-13 +2005-05-13,2005-05-13 +2005-05-14,2005-05-02 +2005-05-15,2005-09-19 +2005-05-16,2005-05-13 +2005-05-17,2005-09-19 +2005-05-18,2005-09-19 +2005-05-19,2005-09-19 +2005-05-20,2005-05-02 +2005-05-21,2005-09-28 +2005-05-22,2005-09-19 +2005-05-23,2005-05-02 +2005-05-24,2005-09-19 +2005-05-25,2005-05-02 +2005-05-26,2005-05-02 +2005-05-27,2005-05-02 +2005-05-28,2005-05-02 +2005-05-29,2005-05-13 +2005-05-30,2005-05-13 +2005-05-31,2005-11-04 +2005-06-01,2005-08-17 +2005-06-02,2005-08-17 +2005-06-03,2005-08-17 +2005-06-04,2005-08-17 +2005-06-05,2005-08-17 +2005-06-06,2005-08-17 +2005-06-07,2005-08-17 +2005-06-08,2005-08-17 +2005-06-09,2005-08-17 +2005-06-10,2005-08-17 +2005-06-11,2005-08-17 +2005-06-12,2005-06-13 +2005-06-13,2005-06-13 +2005-06-14,2005-08-17 +2005-06-15,2005-08-17 +2005-06-16,2005-08-17 +2005-06-17,2005-08-17 +2005-06-18,2005-08-17 +2005-06-19,2005-08-17 +2005-06-20,2005-08-17 +2005-06-21,2005-06-13 +2005-06-22,2005-06-13 +2005-06-23,2005-06-13 +2005-06-24,2005-08-17 +2005-06-25,2005-08-17 +2005-06-26,2005-08-17 +2005-06-27,2005-06-13 +2005-06-28,2005-08-17 +2005-06-29,2005-08-17 +2005-06-30,2005-08-17 +2005-07-01,2005-08-17 +2005-07-02,2005-08-17 +2005-07-03,2005-08-17 +2005-07-04,2005-08-17 +2005-07-05,2005-08-17 +2005-07-06,2005-08-17 +2005-07-07,2005-08-17 +2005-07-08,2005-08-17 +2005-07-09,2005-08-17 +2005-07-10,2005-08-17 +2005-07-11,2005-08-17 +2005-07-12,2005-08-17 +2005-07-13,2005-08-17 +2005-07-14,2005-08-17 +2005-07-15,2005-08-17 +2005-07-16,2005-08-17 +2005-07-17,2005-08-17 +2005-07-18,2005-08-17 +2005-07-19,2005-08-17 +2005-07-20,2005-08-17 +2005-07-21,2005-08-17 +2005-07-22,2005-08-17 +2005-07-23,2005-08-17 +2005-07-24,2005-08-17 +2005-07-25,2005-08-17 +2005-07-26,2005-08-17 +2005-07-27,2005-06-13 +2005-07-28,2005-06-13 +2005-07-29,2005-08-17 +2005-07-30,2005-08-17 +2005-07-31,2005-08-17 +2005-08-01,2005-08-17 +2005-08-02,2005-08-17 +2005-08-03,2005-08-17 +2005-08-04,2005-08-17 +2005-08-05,2005-08-17 +2005-08-06,2005-08-17 +2005-08-07,2005-08-17 +2005-08-08,2005-06-13 +2005-08-09,2005-06-13 +2005-08-10,2005-06-13 +2005-08-11,2005-06-13 +2005-08-12,2005-08-17 +2005-08-13,2005-08-17 +2005-08-14,2005-08-17 +2005-08-15,2005-08-17 +2005-08-16,2005-06-13 +2005-08-17,2005-08-17 +2005-08-18,2005-08-17 +2005-08-19,2005-06-13 +2005-08-20,2005-08-17 +2005-08-21,2005-08-17 +2005-08-22,2005-08-17 +2005-08-23,2005-08-17 +2005-08-24,2005-08-17 +2005-08-25,2005-06-13 +2005-08-26,2005-08-17 +2005-08-27,2005-08-17 +2005-08-28,2005-08-17 +2005-08-29,2005-08-17 +2005-08-30,2005-08-17 +2005-08-31,2005-06-13 +2005-09-01,2005-09-19 +2005-09-02,2005-09-19 +2005-09-03,2005-09-28 +2005-09-04,2005-09-19 +2005-09-05,2005-09-19 +2005-09-06,2005-09-19 +2005-09-07,2005-11-01 +2005-09-08,2005-09-19 +2005-09-09,2005-09-28 +2005-09-10,2005-09-19 +2005-09-11,2005-09-19 +2005-09-12,2005-09-19 +2005-09-13,2005-09-19 +2005-09-14,2005-09-19 +2005-09-15,2005-09-19 +2005-09-16,2005-11-04 +2005-09-17,2005-05-13 +2005-09-18,2005-09-19 +2005-09-19,2005-09-19 +2005-09-20,2005-09-19 +2005-09-21,2005-09-19 +2005-09-22,2005-09-19 +2005-09-23,2005-11-04 +2005-09-24,2005-05-02 +2005-09-25,2005-09-19 +2005-09-26,2005-09-28 +2005-09-27,2005-09-28 +2005-09-28,2005-09-28 +2005-09-29,2005-09-19 +2005-09-30,2005-09-28 +2005-10-01,2005-09-28 +2005-10-02,2005-11-04 +2005-10-03,2005-09-19 +2005-10-04,2005-09-19 +2005-10-05,2005-09-19 +2005-10-06,2005-09-19 +2005-10-07,2005-09-19 +2005-10-08,2005-05-13 +2005-10-09,2005-11-01 +2005-10-10,2005-11-04 +2005-10-11,2005-11-04 +2005-10-12,2005-11-01 +2005-10-13,2005-11-01 +2005-10-14,2005-11-01 +2005-10-15,2005-09-19 +2005-10-16,2005-11-04 +2005-10-17,2005-11-04 +2005-10-18,2005-11-01 +2005-10-19,2005-03-09 +2005-10-20,2005-11-01 +2005-10-21,2005-09-28 +2005-10-22,2005-05-13 +2005-10-23,2005-11-01 +2005-10-24,2005-11-01 +2005-10-25,2005-11-01 +2005-10-26,2005-11-01 +2005-10-27,2005-11-01 +2005-10-28,2005-05-13 +2005-10-29,2005-09-19 +2005-10-30,2005-03-09 +2005-10-31,2005-03-09 +2005-11-01,2005-11-01 +2005-11-02,2005-12-07 +2005-11-03,2005-12-07 +2005-11-04,2005-11-04 +2005-11-05,2005-11-04 +2005-11-06,2005-12-07 +2005-11-07,2005-03-09 +2005-11-08,2005-11-01 +2005-11-09,2005-11-01 +2005-11-10,2005-11-01 +2005-11-11,2005-11-01 +2005-11-12,2005-11-01 +2005-11-13,2005-11-01 +2005-11-14,2005-11-01 +2005-11-15,2005-11-01 +2005-11-16,2005-12-07 +2005-11-17,2005-12-07 +2005-11-18,2005-11-04 +2005-11-19,2005-11-04 +2005-11-20,2005-12-07 +2005-11-21,2005-12-07 +2005-11-22,2005-11-01 +2005-11-23,2005-12-07 +2005-11-24,2005-12-07 +2005-11-25,2005-11-04 +2005-11-26,2005-11-04 +2005-11-27,2005-12-07 +2005-11-28,2005-12-07 +2005-11-29,2005-12-07 +2005-11-30,2005-12-07 +2005-12-01,2005-12-07 +2005-12-02,2005-11-04 +2005-12-03,2005-11-01 +2005-12-04,2005-12-07 +2005-12-05,2005-12-07 +2005-12-06,2005-12-07 +2005-12-07,2005-12-07 +2005-12-08,2005-12-07 +2005-12-09,2005-11-01 +2005-12-10,2005-11-01 +2005-12-11,2005-12-07 +2005-12-12,2005-12-07 +2005-12-13,2005-12-07 +2005-12-14,2005-12-07 +2005-12-15,2005-12-07 +2005-12-16,2005-11-04 +2005-12-17,2005-11-01 +2005-12-18,2005-12-07 +2005-12-19,2005-12-07 +2005-12-20,2005-12-07 +2005-12-21,2005-12-07 +2005-12-22,2005-12-07 +2005-12-23,2005-12-07 +2005-12-24,2005-11-04 +2005-12-25,2005-11-04 +2005-12-26,2005-11-04 +2005-12-27,2005-12-07 +2005-12-28,2005-12-07 +2005-12-29,2005-12-07 +2005-12-30,2005-11-04 +2005-12-31,2005-11-04 diff --git a/src/calliope/example_models/national_scale/data_sources/time_varying_params.csv b/src/calliope/example_models/national_scale/data_tables/time_varying_params.csv similarity index 100% rename from src/calliope/example_models/national_scale/data_sources/time_varying_params.csv rename to src/calliope/example_models/national_scale/data_tables/time_varying_params.csv diff --git a/src/calliope/example_models/national_scale/model.yaml b/src/calliope/example_models/national_scale/model.yaml index 18912aca6..654a71947 100644 --- a/src/calliope/example_models/national_scale/model.yaml +++ b/src/calliope/example_models/national_scale/model.yaml @@ -33,9 +33,9 @@ parameters: bigM: 1e6 # --8<-- [end:parameters] -data_sources: +data_tables: time_varying_parameters: - source: data_sources/time_varying_params.csv + data: data_tables/time_varying_params.csv rows: timesteps columns: [comment, nodes, techs, parameters] - drop: comment \ No newline at end of file + drop: comment diff --git a/src/calliope/example_models/national_scale/scenarios.yaml b/src/calliope/example_models/national_scale/scenarios.yaml index a0763950f..58a3dc81e 100644 --- a/src/calliope/example_models/national_scale/scenarios.yaml +++ b/src/calliope/example_models/national_scale/scenarios.yaml @@ -27,7 +27,7 @@ overrides: init: name: "National-scale example model with time clustering" time_subset: null # No time subsetting - time_cluster: data_sources/cluster_days.csv + time_cluster: data_tables/cluster_days.csv spores: config: diff --git a/src/calliope/example_models/urban_scale/data_sources/demand.csv b/src/calliope/example_models/urban_scale/data_tables/demand.csv similarity index 100% rename from src/calliope/example_models/urban_scale/data_sources/demand.csv rename to src/calliope/example_models/urban_scale/data_tables/demand.csv diff --git a/src/calliope/example_models/urban_scale/data_sources/export_power.csv b/src/calliope/example_models/urban_scale/data_tables/export_power.csv similarity index 100% rename from src/calliope/example_models/urban_scale/data_sources/export_power.csv rename to src/calliope/example_models/urban_scale/data_tables/export_power.csv diff --git a/src/calliope/example_models/urban_scale/data_sources/pv_resource.csv b/src/calliope/example_models/urban_scale/data_tables/pv_resource.csv similarity index 100% rename from src/calliope/example_models/urban_scale/data_sources/pv_resource.csv rename to src/calliope/example_models/urban_scale/data_tables/pv_resource.csv diff --git a/src/calliope/example_models/urban_scale/model.yaml b/src/calliope/example_models/urban_scale/model.yaml index fb892c1d2..e56c13028 100644 --- a/src/calliope/example_models/urban_scale/model.yaml +++ b/src/calliope/example_models/urban_scale/model.yaml @@ -13,11 +13,11 @@ config: calliope_version: 0.7.0 # Time series data path - can either be a path relative to this file, or an absolute path time_subset: ["2005-07-01", "2005-07-02"] # Subset of timesteps - add_math: ["additional_math.yaml"] build: mode: plan # Choices: plan, operate ensure_feasibility: true # Switching on unmet demand + add_math: ["additional_math.yaml"] solve: solver: cbc @@ -33,16 +33,16 @@ parameters: bigM: 1e6 # --8<-- [end:parameters] -# --8<-- [start:data-sources] -data_sources: +# --8<-- [start:data-tables] +data_tables: demand: - source: data_sources/demand.csv + data: data_tables/demand.csv rows: timesteps columns: [techs, nodes] add_dims: parameters: sink_use_equals pv_resource: - source: data_sources/pv_resource.csv + data: data_tables/pv_resource.csv rows: timesteps columns: [comment, scaler] add_dims: @@ -52,7 +52,7 @@ data_sources: scaler: per_area drop: [comment, scaler] export_power: - source: data_sources/export_power.csv + data: data_tables/export_power.csv rows: timesteps columns: nodes add_dims: @@ -60,4 +60,4 @@ data_sources: techs: chp costs: monetary carriers: electricity -# --8<-- [end:data-sources] +# --8<-- [end:data-tables] diff --git a/src/calliope/io.py b/src/calliope/io.py index 4b5b4f805..205ffe7f2 100644 --- a/src/calliope/io.py +++ b/src/calliope/io.py @@ -3,6 +3,7 @@ """Functions to read and save model results.""" import importlib.resources +from copy import deepcopy from pathlib import Path # We import netCDF4 before xarray to mitigate a numpy warning: @@ -124,16 +125,13 @@ def _deserialise(attrs: dict) -> None: attrs[attr] = set(attrs[attr]) -def save_netcdf(model_data, path, model=None): +def save_netcdf(model_data, path, **kwargs): """Save the model to a netCDF file.""" - original_model_data_attrs = model_data.attrs - model_data_attrs = original_model_data_attrs.copy() + original_model_data_attrs = deepcopy(model_data.attrs) + for key, value in kwargs.items(): + model_data.attrs[key] = value - if model is not None and hasattr(model, "_model_def_dict"): - # Attach initial model definition to _model_data - model_data_attrs["_model_def_dict"] = model._model_def_dict.to_yaml() - - _serialise(model_data_attrs) + _serialise(model_data.attrs) for var in model_data.data_vars.values(): _serialise(var.attrs) @@ -147,7 +145,6 @@ def save_netcdf(model_data, path, model=None): } try: - model_data.attrs = model_data_attrs model_data.to_netcdf(path, format="netCDF4", encoding=encoding) model_data.close() # Force-close NetCDF file after writing finally: # Revert model_data.attrs back diff --git a/src/calliope/math/operate.yaml b/src/calliope/math/operate.yaml index 102b345f5..ea5983287 100644 --- a/src/calliope/math/operate.yaml +++ b/src/calliope/math/operate.yaml @@ -1,3 +1,6 @@ +import: + - plan.yaml + constraints: flow_capacity_per_storage_capacity_min.active: false flow_capacity_per_storage_capacity_max.active: false diff --git a/src/calliope/math/base.yaml b/src/calliope/math/plan.yaml similarity index 99% rename from src/calliope/math/base.yaml rename to src/calliope/math/plan.yaml index 62ba0d0ea..b188a868c 100644 --- a/src/calliope/math/base.yaml +++ b/src/calliope/math/plan.yaml @@ -196,10 +196,10 @@ constraints: ( (timesteps=get_val_at_index(timesteps=0) AND cyclic_storage=True) OR NOT timesteps=get_val_at_index(timesteps=0) - ) AND NOT lookup_cluster_first_timestep=True + ) AND NOT cluster_first_timestep=True expression: (1 - storage_loss) ** roll(timestep_resolution, timesteps=1) * roll(storage, timesteps=1) - where: >- - lookup_cluster_first_timestep=True AND NOT + cluster_first_timestep=True AND NOT (timesteps=get_val_at_index(timesteps=0) AND NOT cyclic_storage=True) expression: >- (1 - storage_loss) ** diff --git a/src/calliope/math/spores.yaml b/src/calliope/math/spores.yaml index 743531270..28650c7da 100644 --- a/src/calliope/math/spores.yaml +++ b/src/calliope/math/spores.yaml @@ -1,3 +1,6 @@ +import: + - plan.yaml + constraints: cost_sum_max: equations: diff --git a/src/calliope/model.py b/src/calliope/model.py index 3840830a9..42f67875b 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -12,17 +12,14 @@ import xarray as xr import calliope -from calliope import backend, exceptions, io -from calliope._version import __version__ +from calliope import backend, exceptions, io, preprocess from calliope.attrdict import AttrDict from calliope.postprocess import postprocess as postprocess_results -from calliope.preprocess import load -from calliope.preprocess.data_sources import DataSource +from calliope.preprocess.data_tables import DataTable from calliope.preprocess.model_data import ModelDataFactory from calliope.util.logging import log_time from calliope.util.schema import ( CONFIG_SCHEMA, - MATH_SCHEMA, MODEL_SCHEMA, extract_from_schema, update_then_validate_config, @@ -45,14 +42,15 @@ def read_netcdf(path): class Model: """A Calliope Model.""" - _TS_OFFSET = pd.Timedelta(nanoseconds=1) + _TS_OFFSET = pd.Timedelta(1, unit="nanoseconds") + ATTRS_SAVED = ("_def_path", "applied_math") def __init__( self, model_definition: str | Path | dict | xr.Dataset, scenario: str | None = None, override_dict: dict | None = None, - data_source_dfs: dict[str, pd.DataFrame] | None = None, + data_table_dfs: dict[str, pd.DataFrame] | None = None, **kwargs, ): """Returns a new Model from YAML model configuration files or a fully specified dictionary. @@ -69,8 +67,8 @@ def __init__( Additional overrides to apply to `config`. These will be applied *after* applying any defined `scenario` overrides. Defaults to None. - data_source_dfs (dict[str, pd.DataFrame] | None, optional): - Model definition `data_source` entries can reference in-memory pandas DataFrames. + data_table_dfs (dict[str, pd.DataFrame] | None, optional): + Model definition `data_table` entries can reference in-memory pandas DataFrames. The referenced data must be supplied here as a dictionary of those DataFrames. Defaults to None. **kwargs: initialisation overrides. @@ -78,10 +76,9 @@ def __init__( self._timings: dict = {} self.config: AttrDict self.defaults: AttrDict - self.math: AttrDict - self._model_def_path: Path | None + self.applied_math: preprocess.CalliopeMath + self._def_path: str | None = None self.backend: BackendModel - self.math_documentation = backend.MathDocumentation() self._is_built: bool = False self._is_solved: bool = False @@ -93,13 +90,18 @@ def __init__( if isinstance(model_definition, xr.Dataset): self._init_from_model_data(model_definition) else: - (model_def, self._model_def_path, applied_overrides) = ( - load.load_model_definition( - model_definition, scenario, override_dict, **kwargs - ) + if isinstance(model_definition, dict): + model_def_dict = AttrDict(model_definition) + else: + self._def_path = str(model_definition) + model_def_dict = AttrDict.from_yaml(model_definition) + + (model_def, applied_overrides) = preprocess.load_scenario_overrides( + model_def_dict, scenario, override_dict, **kwargs ) + self._init_from_model_def_dict( - model_def, applied_overrides, scenario, data_source_dfs + model_def, applied_overrides, scenario, data_table_dfs ) self._model_data.attrs["timestamp_model_creation"] = timestamp_model_creation @@ -111,8 +113,6 @@ def __init__( f"but you are running {version_init}. Proceed with caution!" ) - self.math_documentation.inputs = self._model_data - @property def name(self): """Get the model name.""" @@ -143,7 +143,7 @@ def _init_from_model_def_dict( model_definition: calliope.AttrDict, applied_overrides: str, scenario: str | None, - data_source_dfs: dict[str, pd.DataFrame] | None = None, + data_table_dfs: dict[str, pd.DataFrame] | None = None, ) -> None: """Initialise the model using pre-processed YAML files and optional dataframes/dicts. @@ -151,12 +151,11 @@ def _init_from_model_def_dict( model_definition (calliope.AttrDict): preprocessed model configuration. applied_overrides (str): overrides specified by users scenario (str | None): scenario specified by users - data_source_dfs (dict[str, pd.DataFrame] | None, optional): files with additional model information. Defaults to None. + data_table_dfs (dict[str, pd.DataFrame] | None, optional): files with additional model information. Defaults to None. """ # First pass to check top-level keys are all good validate_dict(model_definition, CONFIG_SCHEMA, "Model definition") - self._model_def_dict = model_definition log_time( LOGGER, self._timings, @@ -167,38 +166,32 @@ def _init_from_model_def_dict( model_config.union(model_definition.pop("config"), allow_override=True) init_config = update_then_validate_config("init", model_config) - # We won't store `init` in `self.config`, so we pop it out now. - model_config.pop("init") if init_config["time_cluster"] is not None: init_config["time_cluster"] = relative_path( - self._model_def_path, init_config["time_cluster"] + self._def_path, init_config["time_cluster"] ) param_metadata = {"default": extract_from_schema(MODEL_SCHEMA, "default")} attributes = { "calliope_version_defined": init_config["calliope_version"], - "calliope_version_initialised": __version__, + "calliope_version_initialised": calliope.__version__, "applied_overrides": applied_overrides, "scenario": scenario, "defaults": param_metadata["default"], } - data_sources = [ - DataSource( - init_config, - source_name, - source_dict, - data_source_dfs, - self._model_def_path, + data_tables = [ + DataTable( + init_config, source_name, source_dict, data_table_dfs, self._def_path ) for source_name, source_dict in model_definition.pop( - "data_sources", {} + "data_tables", {} ).items() ] model_data_factory = ModelDataFactory( - init_config, model_definition, data_sources, attributes, param_metadata + init_config, model_definition, data_tables, attributes, param_metadata ) model_data_factory.build() @@ -213,9 +206,6 @@ def _init_from_model_def_dict( self._add_observed_dict("config", model_config) - math = self._add_math(init_config["add_math"]) - self._add_observed_dict("math", math) - self._model_data.attrs["name"] = init_config["name"] log_time( LOGGER, @@ -233,11 +223,12 @@ def _init_from_model_data(self, model_data: xr.Dataset) -> None: model_data (xr.Dataset): Model dataset with input parameters as arrays and configuration stored in the dataset attributes dictionary. """ - if "_model_def_dict" in model_data.attrs: - self._model_def_dict = AttrDict.from_yaml_string( - model_data.attrs["_model_def_dict"] + if "_def_path" in model_data.attrs: + self._def_path = model_data.attrs.pop("_def_path") + if "applied_math" in model_data.attrs: + self.applied_math = preprocess.CalliopeMath.from_dict( + model_data.attrs.pop("applied_math") ) - del model_data.attrs["_model_def_dict"] self._model_data = model_data self._add_model_data_methods() @@ -260,7 +251,6 @@ def _add_model_data_methods(self): """ self._add_observed_dict("config") - self._add_observed_dict("math") def _add_observed_dict(self, name: str, dict_to_add: dict | None = None) -> None: """Add the same dictionary as property of model object and an attribute of the model xarray dataset. @@ -294,52 +284,18 @@ def _add_observed_dict(self, name: str, dict_to_add: dict | None = None) -> None self._model_data.attrs[name] = dict_to_add setattr(self, name, dict_to_add) - def _add_math(self, add_math: list) -> AttrDict: - """Load the base math and optionally override with additional math from a list of references to math files. - - Args: - add_math (list): - List of references to files containing mathematical formulations that will be merged with the base formulation. - - Raises: - exceptions.ModelError: - Referenced pre-defined math files or user-defined math files must exist. - - Returns: - AttrDict: Dictionary of math (constraints, variables, objectives, and global expressions). - """ - math_dir = Path(calliope.__file__).parent / "math" - base_math = AttrDict.from_yaml(math_dir / "base.yaml") - - file_errors = [] - - for filename in add_math: - if not f"{filename}".endswith((".yaml", ".yml")): - yaml_filepath = math_dir / f"{filename}.yaml" - else: - yaml_filepath = relative_path(self._model_def_path, filename) - - if not yaml_filepath.is_file(): - file_errors.append(filename) - continue - else: - override_dict = AttrDict.from_yaml(yaml_filepath) - - base_math.union(override_dict, allow_override=True) - if file_errors: - raise exceptions.ModelError( - f"Attempted to load additional math that does not exist: {file_errors}" - ) - self._model_data.attrs["applied_additional_math"] = add_math - return base_math - - def build(self, force: bool = False, **kwargs) -> None: + def build( + self, force: bool = False, add_math_dict: dict | None = None, **kwargs + ) -> None: """Build description of the optimisation problem in the chosen backend interface. Args: force (bool, optional): If ``force`` is True, any existing results will be overwritten. Defaults to False. + add_math_dict (dict | None, optional): + Additional math to apply on top of the YAML base / additional math files. + Content of this dictionary will override any matching key:value pairs in the loaded math files. **kwargs: build configuration overrides. """ if self._is_built and not force: @@ -355,7 +311,8 @@ def build(self, force: bool = False, **kwargs) -> None: ) backend_config = {**self.config["build"], **kwargs} - if backend_config["mode"] == "operate": + mode = backend_config["mode"] + if mode == "operate": if not self._model_data.attrs["allow_operate_mode"]: raise exceptions.ModelError( "Unable to run this model in operate (i.e. dispatch) mode, probably because " @@ -367,11 +324,20 @@ def build(self, force: bool = False, **kwargs) -> None: ) else: backend_input = self._model_data + + init_math_list = [] if backend_config.get("ignore_mode_math") else [mode] + end_math_list = [] if add_math_dict is None else [add_math_dict] + full_math_list = init_math_list + backend_config["add_math"] + end_math_list + LOGGER.debug(f"Math preprocessing | Loading math: {full_math_list}") + model_math = preprocess.CalliopeMath(full_math_list, self._def_path) + backend_name = backend_config.pop("backend") self.backend = backend.get_model_backend( - backend_name, backend_input, **backend_config + backend_name, backend_input, model_math, **backend_config ) - self.backend.add_all_math() + self.backend.add_optimisation_components() + + self.applied_math = model_math self._model_data.attrs["timestamp_build_complete"] = log_time( LOGGER, @@ -497,7 +463,14 @@ def run(self, force_rerun=False, **kwargs): def to_netcdf(self, path): """Save complete model data (inputs and, if available, results) to a NetCDF file at the given `path`.""" - io.save_netcdf(self._model_data, path, model=self) + saved_attrs = {} + for attr in set(self.ATTRS_SAVED) & set(self.__dict__.keys()): + if not isinstance(getattr(self, attr), str | list | None): + saved_attrs[attr] = dict(getattr(self, attr)) + else: + saved_attrs[attr] = getattr(self, attr) + + io.save_netcdf(self._model_data, path, **saved_attrs) def to_csv( self, path: str | Path, dropna: bool = True, allow_overwrite: bool = False @@ -533,60 +506,6 @@ def info(self) -> str: ) return "\n".join(info_strings) - def validate_math_strings(self, math_dict: dict) -> None: - """Validate that `expression` and `where` strings of a dictionary containing string mathematical formulations can be successfully parsed. - - This function can be used to test user-defined math before attempting to build the optimisation problem. - - NOTE: strings are not checked for evaluation validity. Evaluation issues will be raised only on calling `Model.build()`. - - Args: - math_dict (dict): Math formulation dictionary to validate. Top level keys must be one or more of ["variables", "global_expressions", "constraints", "objectives"], e.g.: - ```python - { - "constraints": { - "my_constraint_name": - { - "foreach": ["nodes"], - "where": "base_tech=supply", - "equations": [{"expression": "sum(flow_cap, over=techs) >= 10"}] - } - - } - } - ``` - Returns: - If all components of the dictionary are parsed successfully, this function will log a success message to the INFO logging level and return None. - Otherwise, a calliope.ModelError will be raised with parsing issues listed. - """ - validate_dict(math_dict, MATH_SCHEMA, "math") - valid_component_names = [ - *self.math["variables"].keys(), - *self.math["global_expressions"].keys(), - *math_dict.get("variables", {}).keys(), - *math_dict.get("global_expressions", {}).keys(), - *self.inputs.data_vars.keys(), - *self.inputs.attrs["defaults"].keys(), - ] - collected_errors: dict = dict() - for component_group, component_dicts in math_dict.items(): - for name, component_dict in component_dicts.items(): - parsed = backend.ParsedBackendComponent( - component_group, name, component_dict - ) - parsed.parse_top_level_where(errors="ignore") - parsed.parse_equations(set(valid_component_names), errors="ignore") - if not parsed._is_valid: - collected_errors[f"{component_group}:{name}"] = parsed._errors - - if collected_errors: - exceptions.print_warnings_and_raise_errors( - during="math string parsing (marker indicates where parsing stopped, which might not be the root cause of the issue; sorry...)", - errors=collected_errors, - ) - - LOGGER.info("Model: validated math strings") - def _prepare_operate_mode_inputs( self, start_window_idx: int = 0, **config_kwargs ) -> xr.Dataset: diff --git a/src/calliope/postprocess/math_documentation.py b/src/calliope/postprocess/math_documentation.py new file mode 100644 index 000000000..ebfb3193f --- /dev/null +++ b/src/calliope/postprocess/math_documentation.py @@ -0,0 +1,101 @@ +"""Post-processing functions to create math documentation.""" + +import logging +import typing +from pathlib import Path +from typing import Literal, overload + +from calliope.backend import ALLOWED_MATH_FILE_FORMATS, LatexBackendModel +from calliope.model import Model + +LOGGER = logging.getLogger(__name__) + + +class MathDocumentation: + """For math documentation.""" + + def __init__( + self, model: Model, include: Literal["all", "valid"] = "all", **kwargs + ) -> None: + """Math documentation builder/writer. + + Backend is always built by default. + + Args: + model (Model): initialised Callipe model instance. + include (Literal["all", "valid"], optional): + Either include all possible math equations ("all") or only those for + which at least one "where" case is valid ("valid"). Defaults to "all". + **kwargs: kwargs for the LaTeX backend. + """ + self.name: str = model.name + " math" + self.backend: LatexBackendModel = LatexBackendModel( + model._model_data, model.applied_math, include, **kwargs + ) + self.backend.add_optimisation_components() + + @property + def math(self): + """Direct access to backend math.""" + return self.backend.math + + # Expecting string if not giving filename. + @overload + def write( + self, + filename: Literal[None] = None, + mkdocs_features: bool = False, + format: ALLOWED_MATH_FILE_FORMATS | None = None, + ) -> str: ... + + # Expecting None (and format arg is not needed) if giving filename. + @overload + def write(self, filename: str | Path, mkdocs_features: bool = False) -> None: ... + + def write( + self, + filename: str | Path | None = None, + mkdocs_features: bool = False, + format: ALLOWED_MATH_FILE_FORMATS | None = None, + ) -> str | None: + """Write model documentation. + + Args: + filename (str | Path | None, optional): + If given, will write the built mathematical formulation to a file with + the given extension as the file format. Defaults to None. + mkdocs_features (bool, optional): + If True and Markdown docs are being generated, then: + - the equations will be on a tab and the original YAML math definition will be on another tab; + - the equation cross-references will be given in a drop-down list. + Defaults to False. + format (ALLOWED_MATH_FILE_FORMATS | None, optional): + Not required if filename is given (as the format will be automatically inferred). + Required if expecting a string return from calling this function. The LaTeX math will be embedded in a document of the given format (tex=LaTeX, rst=reStructuredText, md=Markdown). + Defaults to None. + + Raises: + ValueError: The file format (inferred automatically from `filename` or given by `format`) must be one of ["tex", "rst", "md"]. + + Returns: + str | None: + If `filename` is None, the built mathematical formulation documentation will be returned as a string. + """ + if format is None and filename is not None: + format = Path(filename).suffix.removeprefix(".") # type: ignore + LOGGER.info( + f"Inferring math documentation format from filename as `{format}`." + ) + + allowed_formats = typing.get_args(ALLOWED_MATH_FILE_FORMATS) + if format is None or format not in allowed_formats: + raise ValueError( + f"Math documentation format must be one of {allowed_formats}, received `{format}`" + ) + populated_doc = self.backend.generate_math_doc(format, mkdocs_features) + + if filename is None: + return populated_doc + else: + Path(filename).write_text(populated_doc) + return None diff --git a/src/calliope/preprocess/__init__.py b/src/calliope/preprocess/__init__.py index 0ba2ad525..2b9584be2 100644 --- a/src/calliope/preprocess/__init__.py +++ b/src/calliope/preprocess/__init__.py @@ -1 +1,6 @@ """Preprocessing module.""" + +from calliope.preprocess.data_tables import DataTable +from calliope.preprocess.model_data import ModelDataFactory +from calliope.preprocess.model_math import CalliopeMath +from calliope.preprocess.scenarios import load_scenario_overrides diff --git a/src/calliope/preprocess/data_sources.py b/src/calliope/preprocess/data_tables.py similarity index 88% rename from src/calliope/preprocess/data_sources.py rename to src/calliope/preprocess/data_tables.py index f11bb6687..b8151c1da 100644 --- a/src/calliope/preprocess/data_sources.py +++ b/src/calliope/preprocess/data_tables.py @@ -15,7 +15,7 @@ from calliope.attrdict import AttrDict from calliope.io import load_config from calliope.util.schema import ( - DATA_SOURCE_SCHEMA, + DATA_TABLE_SCHEMA, MODEL_SCHEMA, extract_from_schema, validate_dict, @@ -27,71 +27,71 @@ DTYPE_OPTIONS = {"str": str, "float": float} -class DataSourceDict(TypedDict): - """Uniform dictionary for data sources.""" +class DataTableDict(TypedDict): + """Uniform dictionary for data tables.""" rows: NotRequired[str | list[str]] columns: NotRequired[str | list[str]] - source: str + data: str df: NotRequired[str] add_dims: NotRequired[dict[str, str | list[str]]] select: dict[str, str | bool | int] drop: Hashable | list[Hashable] -class DataSource: +class DataTable: """Class for in memory data handling.""" - MESSAGE_TEMPLATE = "(data_sources, {name}) | {message}." + MESSAGE_TEMPLATE = "(data_tables, {name}) | {message}." PARAMS_TO_INITIALISE_YAML = ["base_tech", "to", "from"] def __init__( self, model_config: dict, - source_name: str, - data_source: DataSourceDict, - data_source_dfs: dict[str, pd.DataFrame] | None = None, + table_name: str, + data_table: DataTableDict, + data_table_dfs: dict[str, pd.DataFrame] | None = None, model_definition_path: Path | None = None, ): - """Load and format a data source from file / in-memory object. + """Load and format a data table from file / in-memory object. Args: model_config (dict): Model initialisation configuration dictionary. - source_name (str): name of the data source. - data_source (DataSourceDict): Data source definition dictionary. - data_source_dfs (dict[str, pd.DataFrame] | None, optional): - If given, a dictionary mapping source names in `data_source` to in-memory pandas DataFrames. + table_name (str): name of the data table. + data_table (DataTableDict): Data table definition dictionary. + data_table_dfs (dict[str, pd.DataFrame] | None, optional): + If given, a dictionary mapping table names in `data_table` to in-memory pandas DataFrames. Defaults to None. model_definition_path (Path | None, optional): - If given, the path to the model definition YAML file, relative to which data source filepaths will be set. - If None, relative data source filepaths will be considered relative to the current working directory. + If given, the path to the model definition YAML file, relative to which data table filepaths will be set. + If None, relative data table filepaths will be considered relative to the current working directory. Defaults to None. """ - validate_dict(data_source, DATA_SOURCE_SCHEMA, "data source") - self.input = data_source - self.dfs = data_source_dfs if data_source_dfs is not None else dict() + validate_dict(data_table, DATA_TABLE_SCHEMA, "data table") + self.input = data_table + self.dfs = data_table_dfs if data_table_dfs is not None else dict() self.model_definition_path = model_definition_path self.config = model_config self.columns = self._listify_if_defined("columns") self.index = self._listify_if_defined("rows") - self._name = source_name + self._name = table_name self.protected_params = load_config("protected_parameters.yaml") - if ".csv" in Path(self.input["source"]).suffixes: + if ".csv" in Path(self.input["data"]).suffixes: df = self._read_csv() else: - df = self.dfs[self.input["source"]] + df = self.dfs[self.input["data"]] self.dataset = self._df_to_ds(df) @property def name(self): - """Data source name.""" + """Data table name.""" return self._name def drop(self, name: str): - """Drop a data in-place from the data source. + """Drop a data in-place from the data table. Args: name (str): Name of data array to drop. @@ -118,15 +118,15 @@ def tech_dict(self) -> tuple[AttrDict, AttrDict]: return tech_dict, base_tech_data def node_dict(self, techs_incl_inheritance: AttrDict) -> AttrDict: - """Create a dummy node definition dictionary from the dimensions defined across all data sources. + """Create a dummy node definition dictionary from the dimensions defined across all data tables. This definition dictionary will ensure that the minimal YAML content is still possible. - This function should be run _after_ `self._update_tech_def_from_data_source`. + This function should be run _after_ `self._update_tech_def_from_data_table`. Args: techs_incl_inheritance (AttrDict): - Technology definition dictionary which is a union of any YAML definition and the result of calling `self.tech_dict` across all data sources. + Technology definition dictionary which is a union of any YAML definition and the result of calling `self.tech_dict` across all data tables. Technologies should have their entire definition inheritance chain resolved. """ node_tech_vars = self.dataset[ @@ -245,7 +245,7 @@ def _read_csv(self) -> pd.DataFrame: Returns: pd.DataFrame: Loaded data without any processing. """ - filename = self.input["source"] + filename = self.input["data"] if self.columns is None: self._log( @@ -272,7 +272,7 @@ def _df_to_ds(self, df: pd.DataFrame) -> xr.Dataset: """ if not isinstance(df, pd.DataFrame): self._raise_error( - "Data source must be a pandas DataFrame. " + "Data table must be a pandas DataFrame. " "If you are providing an in-memory object, ensure it is not a pandas Series by calling the method `to_frame()`" ) for axis, names in {"columns": self.columns, "index": self.index}.items(): @@ -342,7 +342,7 @@ def _check_for_protected_params(self, tdf: pd.Series): if not invalid_params.empty: extra_info = set(self.protected_params[k] for k in invalid_params) exceptions.print_warnings_and_raise_errors( - errors=list(extra_info), during=f"data source loading ({self.name})" + errors=list(extra_info), during=f"data table loading ({self.name})" ) def _check_processed_tdf(self, tdf: pd.Series): @@ -374,7 +374,7 @@ def _log(self, message, level="debug"): ) def _listify_if_defined(self, key: str) -> list | None: - """If `key` is in data source definition dictionary, return values as a list. + """If `key` is in data sourtablece definition dictionary, return values as a list. If values are not yet an iterable, they will be coerced to an iterable of length 1. If they are an iterable, they will be coerced to a list. @@ -384,7 +384,7 @@ def _listify_if_defined(self, key: str) -> list | None: default (Literal[None, 0]): Either zero or None Returns: - list | None: If `key` not defined in data source, return None, else return values as a list. + list | None: If `key` not defined in data table, return None, else return values as a list. """ vals = self.input.get(key, None) if vals is not None: @@ -392,14 +392,14 @@ def _listify_if_defined(self, key: str) -> list | None: return vals def _compare_axis_names(self, loaded_names: list, defined_names: list, axis: str): - """Check loaded axis level names compared to those given by `rows` and `columns` in data source definition dictionary. + """Check loaded axis level names compared to those given by `rows` and `columns` in data table definition dictionary. The data file / in-memory object does not need to have any level names defined, - but if they _are_ defined then they must match those given in the data source definition dictionary. + but if they _are_ defined then they must match those given in the data table definition dictionary. Args: loaded_names (list): Names as defined in the loaded data file / in-memory object. - defined_names (list): Names as defined in the data source dictionary. + defined_names (list): Names as defined in the data table dictionary. axis (str): Axis on which the names are levels. """ if any( diff --git a/src/calliope/preprocess/model_data.py b/src/calliope/preprocess/model_data.py index 442c6226e..6de0aa3f8 100644 --- a/src/calliope/preprocess/model_data.py +++ b/src/calliope/preprocess/model_data.py @@ -15,7 +15,7 @@ from calliope import exceptions from calliope.attrdict import AttrDict -from calliope.preprocess import data_sources, time +from calliope.preprocess import data_tables, time from calliope.util.schema import MODEL_SCHEMA, validate_dict from calliope.util.tools import listify @@ -72,7 +72,7 @@ def __init__( self, model_config: dict, model_definition: ModelDefinition, - data_sources: list[data_sources.DataSource], + data_tables: list[data_tables.DataTable], attributes: dict, param_attributes: dict[str, dict], ): @@ -83,15 +83,15 @@ def __init__( Args: model_config (dict): Model initialisation configuration (i.e., `config.init`). model_definition (ModelDefinition): Definition of model nodes and technologies, and their potential `templates`. - data_sources (list[data_sources.DataSource]): Pre-loaded data sources that will be used to initialise the dataset before handling definitions given in `model_definition`. + data_tables (list[data_tables.DataTable]): Pre-loaded data tables that will be used to initialise the dataset before handling definitions given in `model_definition`. attributes (dict): Attributes to attach to the model Dataset. param_attributes (dict[str, dict]): Attributes to attach to the generated model DataArrays. """ self.config: dict = model_config self.model_definition: ModelDefinition = model_definition.copy() self.dataset = xr.Dataset(attrs=AttrDict(attributes)) - self.tech_data_from_sources = AttrDict() - self.init_from_data_sources(data_sources) + self.tech_data_from_tables = AttrDict() + self.init_from_data_tables(data_tables) flipped_attributes: dict[str, dict] = dict() for key, val in param_attributes.items(): @@ -110,39 +110,39 @@ def build(self): self.update_time_dimension_and_params() self.assign_input_attr() - def init_from_data_sources(self, data_sources: list[data_sources.DataSource]): + def init_from_data_tables(self, data_tables: list[data_tables.DataTable]): """Initialise the model definition and dataset using data loaded from file / in-memory objects. - A basic skeleton of the dictionary format model definition is created from the data sources, + A basic skeleton of the dictionary format model definition is created from the data tables, namely technology and technology-at-node lists (without parameter definitions). Args: - data_sources (list[data_sources.DataSource]): Pre-loaded data sources. + data_tables (list[data_tables.DataTable]): Pre-loaded data tables. """ - for data_source in data_sources: - tech_dict, base_tech_data = data_source.tech_dict() + for data_table in data_tables: + tech_dict, base_tech_data = data_table.tech_dict() tech_dict.union( self.model_definition.get("techs", AttrDict()), allow_override=True ) self.model_definition["techs"] = tech_dict - self.tech_data_from_sources.union(base_tech_data) + self.tech_data_from_tables.union(base_tech_data) techs_incl_inheritance = self._inherit_defs("techs") - for data_source in data_sources: - node_dict = data_source.node_dict(techs_incl_inheritance) + for data_table in data_tables: + node_dict = data_table.node_dict(techs_incl_inheritance) node_dict.union( self.model_definition.get("nodes", AttrDict()), allow_override=True ) self.model_definition["nodes"] = node_dict for param, lookup_dim in self.LOOKUP_PARAMS.items(): - lookup_dict = data_source.lookup_dict_from_param(param, lookup_dim) - self.tech_data_from_sources.union(lookup_dict) + lookup_dict = data_table.lookup_dict_from_param(param, lookup_dim) + self.tech_data_from_tables.union(lookup_dict) if lookup_dict: - data_source.drop(param) + data_table.drop(param) - for data_source in data_sources: + for data_table in data_tables: self._add_to_dataset( - data_source.dataset, f"(data_sources, {data_source.name})" + data_table.dataset, f"(data_tables, {data_table.name})" ) def add_node_tech_data(self): @@ -219,7 +219,7 @@ def add_top_level_params(self): if name in self.dataset.data_vars: exceptions.warn( f"(parameters, {name}) | " - "A parameter with this name has already been defined in a data source or at a node/tech level. " + "A parameter with this name has already been defined in a data table or at a node/tech level. " f"Non-NaN data defined here will override existing data for this parameter." ) param_dict = self._prepare_param_dict(name, data) @@ -609,10 +609,10 @@ def _climb_template_tree( to_inherit = dim_item_dict.get("template", None) dim_groups = AttrDict(self.model_definition.get("templates", {})) if to_inherit is None: - if dim_name == "techs" and item_name in self.tech_data_from_sources: - _data_source_dict = deepcopy(self.tech_data_from_sources[item_name]) - _data_source_dict.union(dim_item_dict, allow_override=True) - dim_item_dict = _data_source_dict + if dim_name == "techs" and item_name in self.tech_data_from_tables: + _data_table_dict = deepcopy(self.tech_data_from_tables[item_name]) + _data_table_dict.union(dim_item_dict, allow_override=True) + dim_item_dict = _data_table_dict updated_dim_item_dict = dim_item_dict elif to_inherit not in dim_groups: raise KeyError( diff --git a/src/calliope/preprocess/model_math.py b/src/calliope/preprocess/model_math.py new file mode 100644 index 000000000..a05a6a126 --- /dev/null +++ b/src/calliope/preprocess/model_math.py @@ -0,0 +1,176 @@ +"""Calliope math handling with interfaces for pre-defined and user-defined files.""" + +import importlib.resources +import logging +import typing +from copy import deepcopy +from pathlib import Path + +from calliope.attrdict import AttrDict +from calliope.exceptions import ModelError +from calliope.util.schema import MATH_SCHEMA, validate_dict +from calliope.util.tools import relative_path + +LOGGER = logging.getLogger(__name__) +ORDERED_COMPONENTS_T = typing.Literal[ + "variables", + "global_expressions", + "constraints", + "piecewise_constraints", + "objectives", +] + + +class CalliopeMath: + """Calliope math handling.""" + + ATTRS_TO_SAVE = ("history", "data") + ATTRS_TO_LOAD = ("history",) + + def __init__( + self, math_to_add: list[str | dict], model_def_path: str | Path | None = None + ): + """Calliope YAML math handler. + + Args: + math_to_add (list[str | dict]): + List of Calliope math to load. + If a string, it can be a reference to pre-/user-defined math files. + If a dictionary, it is equivalent in structure to a YAML math file. + model_def_path (str | Path | None, optional): Model definition path, needed when using relative paths. Defaults to None. + """ + self.history: list[str] = [] + self.data: AttrDict = AttrDict( + {name: {} for name in typing.get_args(ORDERED_COMPONENTS_T)} + ) + + for math in math_to_add: + if isinstance(math, dict): + self.add(AttrDict(math)) + else: + self._init_from_string(math, model_def_path) + + def __eq__(self, other): + """Compare between two model math instantiations.""" + if not isinstance(other, CalliopeMath): + return NotImplemented + return self.history == other.history and self.data == other.data + + def __iter__(self): + """Enable dictionary conversion.""" + for key in self.ATTRS_TO_SAVE: + yield key, deepcopy(getattr(self, key)) + + def __repr__(self) -> str: + """Custom string representation of class.""" + return f"""Calliope math definition dictionary with: + {len(self.data["variables"])} decision variable(s) + {len(self.data["global_expressions"])} global expression(s) + {len(self.data["constraints"])} constraint(s) + {len(self.data["piecewise_constraints"])} piecewise constraint(s) + {len(self.data["objectives"])} objective(s) + """ + + def add(self, math: AttrDict): + """Add math into the model. + + Args: + math (AttrDict): Valid math dictionary. + """ + self.data.union(math, allow_override=True) + + @classmethod + def from_dict(cls, math_dict: dict) -> "CalliopeMath": + """Load a CalliopeMath object from a dictionary representation, recuperating relevant attributes. + + Args: + math_dict (dict): Dictionary representation of a CalliopeMath object. + + Returns: + CalliopeMath: Loaded from supplied dictionary representation. + """ + new_self = cls([math_dict["data"]]) + for attr in cls.ATTRS_TO_LOAD: + setattr(new_self, attr, math_dict[attr]) + return new_self + + def in_history(self, math_name: str) -> bool: + """Evaluate if math has already been applied. + + Args: + math_name (str): Math file to check. + + Returns: + bool: `True` if found in history. `False` otherwise. + """ + return math_name in self.history + + def validate(self) -> None: + """Test current math and optional external math against the MATH schema.""" + validate_dict(self.data, MATH_SCHEMA, "math") + LOGGER.info("Math preprocessing | validated math against schema.") + + def _add_pre_defined_file(self, filename: str) -> None: + """Add pre-defined Calliope math. + + Args: + filename (str): name of Calliope internal math (no suffix). + + Raises: + ModelError: If math has already been applied. + """ + if self.in_history(filename): + raise ModelError( + f"Math preprocessing | Overwriting with previously applied pre-defined math: '{filename}'." + ) + with importlib.resources.as_file( + importlib.resources.files("calliope") / "math" + ) as f: + self._add_file(f / f"{filename}.yaml", filename) + + def _add_user_defined_file( + self, relative_filepath: str | Path, model_def_path: str | Path | None + ) -> None: + """Add user-defined Calliope math, relative to the model definition path. + + Args: + relative_filepath (str | Path): Path to user math, relative to model definition. + model_def_path (str | Path): Model definition path. + + Raises: + ModelError: If file has already been applied. + """ + math_name = str(relative_filepath) + if self.in_history(math_name): + raise ModelError( + f"Math preprocessing | Overwriting with previously applied user-defined math: '{relative_filepath}'." + ) + self._add_file(relative_path(model_def_path, relative_filepath), math_name) + + def _init_from_string( + self, math_to_add: str, model_def_path: str | Path | None = None + ): + """Load math definition from a list of files. + + Args: + math_to_add (str): Calliope math file to load. Suffix implies user-math. + model_def_path (str | Path | None, optional): Model definition path. Defaults to None. + + Raises: + ModelError: User-math requested without providing `model_def_path`. + """ + if not math_to_add.endswith((".yaml", ".yml")): + self._add_pre_defined_file(math_to_add) + else: + self._add_user_defined_file(math_to_add, model_def_path) + + def _add_file(self, yaml_filepath: Path, name: str) -> None: + try: + math = AttrDict.from_yaml(yaml_filepath, allow_override=True) + except FileNotFoundError: + raise ModelError( + f"Math preprocessing | File does not exist: {yaml_filepath}" + ) + self.add(math) + self.history.append(name) + LOGGER.info(f"Math preprocessing | added file '{name}'.") diff --git a/src/calliope/preprocess/load.py b/src/calliope/preprocess/scenarios.py similarity index 55% rename from src/calliope/preprocess/load.py rename to src/calliope/preprocess/scenarios.py index 6c1df0452..473544fbe 100644 --- a/src/calliope/preprocess/load.py +++ b/src/calliope/preprocess/scenarios.py @@ -3,7 +3,6 @@ """Preprocessing of base model definition and overrides/scenarios into a unified dictionary.""" import logging -from pathlib import Path from calliope import exceptions from calliope.attrdict import AttrDict @@ -12,96 +11,35 @@ LOGGER = logging.getLogger(__name__) -def load_model_definition( - model_definition: str | Path | dict, +def load_scenario_overrides( + model_definition: dict, scenario: str | None = None, override_dict: dict | None = None, **kwargs, -) -> tuple[AttrDict, Path | None, str]: - """Load model definition from file / dictionary and apply user-defined overrides. +) -> tuple[AttrDict, str]: + """Apply user-defined overrides to the model definition. Args: - model_definition (str | Path | dict): - If string or pathlib.Path, path to YAML file with model configuration. - If dictionary, equivalent to loading the model configuration YAML from file. - scenario (str | None, optional): - If not None, name of scenario to apply. - Can either be a named scenario, or a comma-separated list of individual overrides to be combined ad-hoc, - e.g. 'my_scenario_name' or 'override1,override2'. + model_definition (dict): + Model definition dictionary. + scenario (str | None, optional): Scenario(s) to apply, comma separated. + e.g.: 'my_scenario_name' or 'override1,override2'. Defaults to None. override_dict (dict | None, optional): - If not None, dictionary of overrides to apply. - These will be applied _after_ `scenario` overrides. + Overrides to apply _after_ `scenario` overrides. Defaults to None. - **kwargs: initialisation overrides. + **kwargs: + initialisation overrides. Returns: - tuple[AttrDict, Path | None, str]: + tuple[AttrDict, str]: 1. Model definition with overrides applied. - 2. Path to model definition YAML if input `model_definiton` was pathlike, otherwise None. - 3. Expansion of scenarios (which are references to model overrides) into a list of named override(s) that have been applied. + 2. Expansion of scenarios (which are references to model overrides) into a list of named override(s) that have been applied. """ - if not isinstance(model_definition, dict): - model_def_path = Path(model_definition) - model_def_dict = AttrDict.from_yaml(model_def_path) - else: - model_def_dict = AttrDict(model_definition) - model_def_path = None - - model_def_with_overrides, applied_overrides = _apply_overrides( - model_def_dict, scenario=scenario, override_dict=override_dict - ) - model_def_with_overrides.union( - AttrDict({"config.init": kwargs}), allow_override=True - ) - - return (model_def_with_overrides, model_def_path, ";".join(applied_overrides)) - - -def _combine_overrides(overrides: AttrDict, scenario_overrides: list): - combined_override_dict = AttrDict() - for override in scenario_overrides: - try: - yaml_string = overrides[override].to_yaml() - override_with_imports = AttrDict.from_yaml_string(yaml_string) - except KeyError: - raise exceptions.ModelError(f"Override `{override}` is not defined.") - try: - combined_override_dict.union(override_with_imports, allow_override=False) - except KeyError as e: - raise exceptions.ModelError( - f"{str(e)[1:-1]}. Already specified but defined again in override `{override}`." - ) - - return combined_override_dict - - -def _apply_overrides( - model_def: AttrDict, - scenario: str | None = None, - override_dict: str | dict | None = None, -) -> tuple[AttrDict, list[str]]: - """Generate processed Model configuration, applying any scenario overrides. - - Args: - model_def (calliope.Attrdict): Loaded model definition as an attribute dictionary. - scenario (str | None, optional): - If not None, name of scenario to apply. - Can either be a named scenario, or a comma-separated list of individual overrides to be combined ad-hoc, - e.g. 'my_scenario_name' or 'override1,override2'. - Defaults to None. - override_dict (str | dict | None, optional): - If not None, dictionary of overrides to apply. - These will be applied _after_ `scenario` overrides. - Defaults to None. + model_def_dict = AttrDict(model_definition) - Returns: - tuple[AttrDict, list[str]]: - 1. Model definition dictionary with overrides applied from `scenario` and `override_dict`. - 1. Expansion of scenarios (which are references to model overrides) into a list of named override(s) that have been applied. - """ # The input files are allowed to override other model defaults - model_def_copy = model_def.copy() + model_def_with_overrides = model_def_dict.copy() # First pass of applying override dict before applying scenarios, # so that can override scenario definitions by override_dict @@ -110,43 +48,69 @@ def _apply_overrides( if isinstance(override_dict, dict): override_dict = AttrDict(override_dict) - model_def_copy.union(override_dict, allow_override=True, allow_replacement=True) + model_def_with_overrides.union( + override_dict, allow_override=True, allow_replacement=True + ) - overrides = model_def_copy.pop("overrides", {}) - scenarios = model_def_copy.pop("scenarios", {}) + overrides = model_def_with_overrides.pop("overrides", {}) + scenarios = model_def_with_overrides.pop("scenarios", {}) if scenario is not None: - scenario_overrides = _load_overrides_from_scenario( - model_def_copy, scenario, overrides, scenarios + applied_overrides = _load_overrides_from_scenario( + model_def_with_overrides, scenario, overrides, scenarios ) LOGGER.info( - f"(scenarios, {scenario} ) | Applying the following overrides: {scenario_overrides}." + f"(scenarios, {scenario} ) | Applying the following overrides: {applied_overrides}." ) - overrides_from_scenario = _combine_overrides(overrides, scenario_overrides) + overrides_from_scenario = _combine_overrides(overrides, applied_overrides) - model_def_copy.union( + model_def_with_overrides.union( overrides_from_scenario, allow_override=True, allow_replacement=True ) else: - scenario_overrides = [] + applied_overrides = [] # Second pass of applying override dict after applying scenarios, # so that scenario-based overrides are overridden by override_dict! if override_dict is not None: - model_def_copy.union(override_dict, allow_override=True, allow_replacement=True) - if "locations" in model_def_copy.keys(): + model_def_with_overrides.union( + override_dict, allow_override=True, allow_replacement=True + ) + if "locations" in model_def_with_overrides.keys(): # TODO: remove in v0.7.1 exceptions.warn( "`locations` has been renamed to `nodes` and will stop working " "in v0.7.1. Please update your model configuration accordingly.", FutureWarning, ) - model_def_copy["nodes"] = model_def_copy["locations"] - del model_def_copy["locations"] + model_def_with_overrides["nodes"] = model_def_with_overrides["locations"] + del model_def_with_overrides["locations"] - _log_overrides(model_def, model_def_copy) + _log_overrides(model_def_dict, model_def_with_overrides) - return model_def_copy, scenario_overrides + model_def_with_overrides.union( + AttrDict({"config.init": kwargs}), allow_override=True + ) + + return (model_def_with_overrides, ";".join(applied_overrides)) + + +def _combine_overrides(overrides: AttrDict, scenario_overrides: list): + combined_override_dict = AttrDict() + for override in scenario_overrides: + try: + yaml_string = overrides[override].to_yaml() + override_with_imports = AttrDict.from_yaml_string(yaml_string) + except KeyError: + raise exceptions.ModelError(f"Override `{override}` is not defined.") + try: + combined_override_dict.union(override_with_imports, allow_override=False) + except KeyError as e: + raise exceptions.ModelError( + f"{str(e)[1:-1]}. Already specified but defined again in override `{override}`." + ) + + return combined_override_dict def _load_overrides_from_scenario( diff --git a/src/calliope/preprocess/time.py b/src/calliope/preprocess/time.py index ebb16ee7f..6b1cc97b6 100644 --- a/src/calliope/preprocess/time.py +++ b/src/calliope/preprocess/time.py @@ -262,11 +262,14 @@ def _lookup_clusters(dataset: xr.Dataset, grouper: pd.Series) -> xr.Dataset: 1. the first and last timestep of the cluster, 2. the last timestep of the cluster corresponding to a date in the original timeseries """ - dataset["lookup_cluster_first_timestep"] = dataset.timesteps.isin( + dataset["cluster_first_timestep"] = dataset.timesteps.isin( dataset.timesteps.groupby("timesteps.date").first() ) - dataset["lookup_cluster_last_timestep"] = dataset.timesteps.isin( - dataset.timesteps.groupby("timesteps.date").last() + dataset["lookup_cluster_last_timestep"] = ( + dataset.timesteps.groupby("timesteps.date") + .last() + .rename({"date": "timesteps"}) + .reindex_like(dataset.timesteps) ) dataset["lookup_datestep_cluster"] = xr.DataArray( diff --git a/src/calliope/util/schema.py b/src/calliope/util/schema.py index a98f9bde5..bd98cc772 100644 --- a/src/calliope/util/schema.py +++ b/src/calliope/util/schema.py @@ -16,7 +16,7 @@ CONFIG_SCHEMA = load_config("config_schema.yaml") MODEL_SCHEMA = load_config("model_def_schema.yaml") -DATA_SOURCE_SCHEMA = load_config("data_source_schema.yaml") +DATA_TABLE_SCHEMA = load_config("data_table_schema.yaml") MATH_SCHEMA = load_config("math_schema.yaml") diff --git a/src/calliope/util/tools.py b/src/calliope/util/tools.py index 821949279..51920d886 100644 --- a/src/calliope/util/tools.py +++ b/src/calliope/util/tools.py @@ -19,13 +19,13 @@ def relative_path(base_path_file, path) -> Path: """ # Check if base_path_file is a string because it might be an AttrDict path = Path(path) - if base_path_file is not None: + if path.is_absolute() or base_path_file is None: + return path + else: base_path_file = Path(base_path_file) if base_path_file.is_file(): base_path_file = base_path_file.parent - if not path.is_absolute(): - path = base_path_file.absolute() / path - return path + return base_path_file.absolute() / path def listify(var: Any) -> list: @@ -40,7 +40,9 @@ def listify(var: Any) -> list: Returns: list: List containing `var` or elements of `var` (if input was a non-string iterable). """ - if not isinstance(var, str) and hasattr(var, "__iter__"): + if var is None: + var = [] + elif not isinstance(var, str) and hasattr(var, "__iter__"): var = list(var) else: var = [var] diff --git a/tests/common/national_scale_from_data_sources/data_sources/costs_params.csv b/tests/common/national_scale_from_data_tables/data_tables/costs_params.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/costs_params.csv rename to tests/common/national_scale_from_data_tables/data_tables/costs_params.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/dimensionless_params.csv b/tests/common/national_scale_from_data_tables/data_tables/dimensionless_params.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/dimensionless_params.csv rename to tests/common/national_scale_from_data_tables/data_tables/dimensionless_params.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/links.csv b/tests/common/national_scale_from_data_tables/data_tables/links.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/links.csv rename to tests/common/national_scale_from_data_tables/data_tables/links.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/nodes_base_info.csv b/tests/common/national_scale_from_data_tables/data_tables/nodes_base_info.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/nodes_base_info.csv rename to tests/common/national_scale_from_data_tables/data_tables/nodes_base_info.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/techs_base_info.csv b/tests/common/national_scale_from_data_tables/data_tables/techs_base_info.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/techs_base_info.csv rename to tests/common/national_scale_from_data_tables/data_tables/techs_base_info.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/techs_carriers.csv b/tests/common/national_scale_from_data_tables/data_tables/techs_carriers.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/techs_carriers.csv rename to tests/common/national_scale_from_data_tables/data_tables/techs_carriers.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/techs_constraints.csv b/tests/common/national_scale_from_data_tables/data_tables/techs_constraints.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/techs_constraints.csv rename to tests/common/national_scale_from_data_tables/data_tables/techs_constraints.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/techs_costs_monetary.csv b/tests/common/national_scale_from_data_tables/data_tables/techs_costs_monetary.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/techs_costs_monetary.csv rename to tests/common/national_scale_from_data_tables/data_tables/techs_costs_monetary.csv diff --git a/tests/common/national_scale_from_data_sources/data_sources/techs_node_constraints.csv b/tests/common/national_scale_from_data_tables/data_tables/techs_node_constraints.csv similarity index 100% rename from tests/common/national_scale_from_data_sources/data_sources/techs_node_constraints.csv rename to tests/common/national_scale_from_data_tables/data_tables/techs_node_constraints.csv diff --git a/tests/common/national_scale_from_data_sources/model.yaml b/tests/common/national_scale_from_data_tables/model.yaml similarity index 76% rename from tests/common/national_scale_from_data_sources/model.yaml rename to tests/common/national_scale_from_data_tables/model.yaml index eec3b0c55..e9062f843 100644 --- a/tests/common/national_scale_from_data_sources/model.yaml +++ b/tests/common/national_scale_from_data_tables/model.yaml @@ -21,35 +21,35 @@ nodes: region1_2.techs: {csp} region1_3.techs: {csp} -data_sources: +data_tables: dimensionless_params: - source: data_sources/dimensionless_params.csv + data: data_tables/dimensionless_params.csv rows: parameters costs_params: - source: data_sources/costs_params.csv + data: data_tables/costs_params.csv rows: costs columns: parameters nodes_base_info: - source: data_sources/nodes_base_info.csv + data: data_tables/nodes_base_info.csv rows: nodes columns: parameters techs_carriers_at_nodes: - source: data_sources/techs_carriers.csv + data: data_tables/techs_carriers.csv rows: techs columns: parameters add_dims: carriers: power links: - source: data_sources/links.csv + data: data_tables/links.csv rows: techs columns: parameters techs_costs_monetary: - source: data_sources/techs_costs_monetary.csv + data: data_tables/techs_costs_monetary.csv rows: techs columns: parameters add_dims: @@ -57,22 +57,22 @@ data_sources: # will be loaded from the example model directory in calliope source code. time_varying_data_from_df: - source: time_varying_df + data: time_varying_df rows: timesteps columns: [comment, nodes, techs, parameters] drop: comment techs_base_info: - source: data_sources/techs_base_info.csv + data: data_tables/techs_base_info.csv rows: techs columns: parameters techs_constraints: - source: data_sources/techs_constraints.csv + data: data_tables/techs_constraints.csv rows: techs columns: parameters techs_node_constraints: - source: data_sources/techs_node_constraints.csv + data: data_tables/techs_node_constraints.csv rows: [nodes, techs] - columns: parameters \ No newline at end of file + columns: parameters diff --git a/tests/common/test_model/data_sources/cluster_days.csv b/tests/common/test_model/data_tables/cluster_days.csv similarity index 100% rename from tests/common/test_model/data_sources/cluster_days.csv rename to tests/common/test_model/data_tables/cluster_days.csv diff --git a/tests/common/test_model/data_sources/cluster_days_diff_dateformat.csv b/tests/common/test_model/data_tables/cluster_days_diff_dateformat.csv similarity index 100% rename from tests/common/test_model/data_sources/cluster_days_diff_dateformat.csv rename to tests/common/test_model/data_tables/cluster_days_diff_dateformat.csv diff --git a/tests/common/test_model/data_sources/demand_elec.csv b/tests/common/test_model/data_tables/demand_elec.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_elec.csv rename to tests/common/test_model/data_tables/demand_elec.csv diff --git a/tests/common/test_model/data_sources/demand_elec_15T_to_2h.csv b/tests/common/test_model/data_tables/demand_elec_15T_to_2h.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_elec_15T_to_2h.csv rename to tests/common/test_model/data_tables/demand_elec_15T_to_2h.csv diff --git a/tests/common/test_model/data_sources/demand_elec_15mins.csv b/tests/common/test_model/data_tables/demand_elec_15mins.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_elec_15mins.csv rename to tests/common/test_model/data_tables/demand_elec_15mins.csv diff --git a/tests/common/test_model/data_sources/demand_heat.csv b/tests/common/test_model/data_tables/demand_heat.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_heat.csv rename to tests/common/test_model/data_tables/demand_heat.csv diff --git a/tests/common/test_model/data_sources/demand_heat_diff_dateformat.csv b/tests/common/test_model/data_tables/demand_heat_diff_dateformat.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_heat_diff_dateformat.csv rename to tests/common/test_model/data_tables/demand_heat_diff_dateformat.csv diff --git a/tests/common/test_model/data_sources/demand_heat_wrong_dateformat.csv b/tests/common/test_model/data_tables/demand_heat_wrong_dateformat.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_heat_wrong_dateformat.csv rename to tests/common/test_model/data_tables/demand_heat_wrong_dateformat.csv diff --git a/tests/common/test_model/data_sources/demand_heat_wrong_length.csv b/tests/common/test_model/data_tables/demand_heat_wrong_length.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_heat_wrong_length.csv rename to tests/common/test_model/data_tables/demand_heat_wrong_length.csv diff --git a/tests/common/test_model/data_sources/demand_simple.csv b/tests/common/test_model/data_tables/demand_simple.csv similarity index 100% rename from tests/common/test_model/data_sources/demand_simple.csv rename to tests/common/test_model/data_tables/demand_simple.csv diff --git a/tests/common/test_model/data_sources/supply_plus_resource.csv b/tests/common/test_model/data_tables/supply_plus_resource.csv similarity index 100% rename from tests/common/test_model/data_sources/supply_plus_resource.csv rename to tests/common/test_model/data_tables/supply_plus_resource.csv diff --git a/tests/common/test_model/data_sources/supply_simple.csv b/tests/common/test_model/data_tables/supply_simple.csv similarity index 100% rename from tests/common/test_model/data_sources/supply_simple.csv rename to tests/common/test_model/data_tables/supply_simple.csv diff --git a/tests/common/test_model/model.yaml b/tests/common/test_model/model.yaml index aa9056805..9eef09c52 100644 --- a/tests/common/test_model/model.yaml +++ b/tests/common/test_model/model.yaml @@ -23,9 +23,9 @@ config: parameters: bigM: 1e3 -data_sources: +data_tables: demand_elec: - source: data_sources/demand_elec.csv + data: data_tables/demand_elec.csv rows: timesteps columns: nodes add_dims: diff --git a/tests/common/test_model/scenarios.yaml b/tests/common/test_model/scenarios.yaml index f0f7f78b3..f15315117 100644 --- a/tests/common/test_model/scenarios.yaml +++ b/tests/common/test_model/scenarios.yaml @@ -29,9 +29,9 @@ overrides: test_supply_elec: simple_supply_plus: # does not have a solution - data_sources: + data_tables: supply_plus_resource: - source: data_sources/supply_plus_resource.csv + data: data_tables/supply_plus_resource.csv rows: timesteps columns: nodes add_dims: @@ -39,9 +39,9 @@ overrides: techs: test_supply_plus simple_supply_and_supply_plus: - data_sources: + data_tables: supply_plus_resource: - source: data_sources/supply_plus_resource.csv + data: data_tables/supply_plus_resource.csv rows: timesteps columns: nodes add_dims: @@ -53,9 +53,9 @@ overrides: b.techs.test_supply_elec: supply_and_supply_plus_milp: - data_sources: + data_tables: supply_plus_resource: - source: data_sources/supply_plus_resource.csv + data: data_tables/supply_plus_resource.csv rows: timesteps columns: nodes select: @@ -134,9 +134,9 @@ overrides: test_supply_elec: simple_conversion: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes add_dims: @@ -157,9 +157,9 @@ overrides: test_conversion: conversion_and_conversion_plus: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes select: @@ -180,9 +180,9 @@ overrides: templates.test_transmission.active: false conversion_plus_milp: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes select: @@ -209,9 +209,9 @@ overrides: templates.test_transmission.active: false conversion_milp: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes select: @@ -234,9 +234,9 @@ overrides: templates.test_transmission.active: false conversion_plus_purchase: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes select: @@ -260,9 +260,9 @@ overrides: templates.test_transmission.active: false simple_conversion_plus: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes select: @@ -281,9 +281,9 @@ overrides: templates.test_transmission.active: false simple_chp: - data_sources: + data_tables: demand_heat: - source: data_sources/demand_heat.csv + data: data_tables/demand_heat.csv rows: timesteps columns: nodes select: @@ -440,7 +440,7 @@ overrides: dims: costs demand_elec_max: - data_sources: + data_tables: demand_elec: add_dims: parameters: sink_use_max diff --git a/tests/common/util.py b/tests/common/util.py index f426ea89e..8ae70da83 100644 --- a/tests/common/util.py +++ b/tests/common/util.py @@ -2,23 +2,25 @@ from pathlib import Path from typing import Literal -import calliope import xarray as xr -from calliope import backend + +import calliope +import calliope.backend +import calliope.preprocess def build_test_model( override_dict=None, scenario=None, model_file="model.yaml", - data_source_dfs=None, + data_table_dfs=None, **init_kwargs, ): return calliope.Model( os.path.join(os.path.dirname(__file__), "test_model", model_file), override_dict=override_dict, scenario=scenario, - data_source_dfs=data_source_dfs, + data_table_dfs=data_table_dfs, **init_kwargs, ) @@ -79,9 +81,9 @@ def check_variable_exists( def build_lp( model: calliope.Model, outfile: str | Path, - math: dict[str, dict | list] | None = None, + math_data: dict[str, dict | list] | None = None, backend_name: Literal["pyomo"] = "pyomo", -) -> "backend.BackendModel": +) -> "calliope.backend.backend_model.BackendModel": """ Write a barebones LP file with which to compare in tests. All model parameters and variables will be loaded automatically, as well as a dummy objective if one isn't provided as part of `math`. @@ -93,41 +95,43 @@ def build_lp( math (dict | None, optional): All constraint/global expression/objective math to apply. Defaults to None. backend_name (Literal["pyomo"], optional): Backend to use to create the LP file. Defaults to "pyomo". """ - backend_instance = backend.get_model_backend(backend_name, model._model_data) - - for name, dict_ in model.math["variables"].items(): - backend_instance.add_variable(name, dict_) - for name, dict_ in model.math["global_expressions"].items(): - backend_instance.add_global_expression(name, dict_) + math = calliope.preprocess.CalliopeMath( + ["plan", *model.config.build.get("add_math", [])] + ) - if isinstance(math, dict): - for component_group, component_math in math.items(): - component = component_group.removesuffix("s") + math_to_add = calliope.AttrDict() + if isinstance(math_data, dict): + for component_group, component_math in math_data.items(): if isinstance(component_math, dict): - for name, dict_ in component_math.items(): - getattr(backend_instance, f"add_{component}")(name, dict_) + math_to_add.union(calliope.AttrDict({component_group: component_math})) elif isinstance(component_math, list): for name in component_math: - dict_ = model.math[component_group][name] - getattr(backend_instance, f"add_{component}")(name, dict_) - - # MUST have an objective for a valid LP file - if math is None or "objectives" not in math.keys(): - backend_instance.add_objective( - "dummy_obj", {"equations": [{"expression": "1 + 1"}], "sense": "minimize"} - ) - backend_instance._instance.objectives["dummy_obj"][0].activate() - elif "objectives" in math.keys(): - if isinstance(math["objectives"], dict): - objectives = list(math["objectives"].keys()) - else: - objectives = math["objectives"] - assert len(objectives) == 1, "Can only test with one objective" - backend_instance._instance.objectives[objectives[0]][0].activate() + math_to_add.set_key( + f"{component_group}.{name}", math.data[component_group][name] + ) + if math_data is None or "objectives" not in math_to_add.keys(): + obj = { + "dummy_obj": {"equations": [{"expression": "1 + 1"}], "sense": "minimize"} + } + math_to_add.union(calliope.AttrDict({"objectives": obj})) + obj_to_activate = "dummy_obj" + else: + obj_to_activate = list(math_to_add["objectives"].keys())[0] + del math.data["constraints"] + del math.data["objectives"] + math.add(math_to_add) + + model.build( + add_math_dict=math.data, + ignore_mode_math=True, + objective=obj_to_activate, + add_math=[], + pre_validate_math_strings=False, + ) - backend_instance.verbose_strings() + model.backend.verbose_strings() - backend_instance.to_lp(str(outfile)) + model.backend.to_lp(str(outfile)) # strip trailing whitespace from `outfile` after the fact, # so it can be reliably compared other files in future @@ -138,4 +142,4 @@ def build_lp( # reintroduce the trailing newline since both Pyomo and file formatters love them. Path(outfile).write_text("\n".join(stripped_lines) + "\n") - return backend_instance + return model.backend diff --git a/tests/conftest.py b/tests/conftest.py index b2c4d5f51..3d4694c53 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,8 +4,10 @@ import numpy as np import pytest import xarray as xr + from calliope.attrdict import AttrDict from calliope.backend import latex_backend_model, pyomo_backend_model +from calliope.preprocess import CalliopeMath from calliope.util.schema import CONFIG_SCHEMA, MODEL_SCHEMA, extract_from_schema from .common.util import build_test_model as build_model @@ -41,7 +43,7 @@ def model_defaults(): @pytest.fixture(scope="session") def data_source_dir(): - return Path(__file__).parent / "common" / "test_model" / "data_sources" + return Path(__file__).parent / "common" / "test_model" / "data_tables" @pytest.fixture(scope="session") @@ -52,7 +54,7 @@ def simple_supply(): return m -@pytest.fixture() +@pytest.fixture def simple_supply_build_func(): m = build_model({}, "simple_supply,two_hours,investment_costs") m.build() @@ -155,6 +157,20 @@ def simple_conversion_plus(): return m +@pytest.fixture(scope="module") +def dummy_model_math(): + math = { + "data": { + "constraints": {}, + "variables": {}, + "global_expressions": {}, + "objectives": {}, + }, + "history": [], + } + return CalliopeMath.from_dict(math) + + @pytest.fixture(scope="module") def dummy_model_data(config_defaults, model_defaults): coords = { @@ -241,10 +257,6 @@ def dummy_model_data(config_defaults, model_defaults): ["nodes", "techs"], [[False, False, False, False], [False, False, False, True]], ), - "primary_carrier_out": ( - ["carriers", "techs"], - [[1.0, np.nan, 1.0, np.nan], [np.nan, 1.0, np.nan, np.nan]], - ), "lookup_techs": (["techs"], ["foobar", np.nan, "foobaz", np.nan]), "lookup_techs_no_match": (["techs"], ["foo", np.nan, "bar", np.nan]), "lookup_multi_dim_nodes": ( @@ -288,16 +300,18 @@ def dummy_model_data(config_defaults, model_defaults): "all_nan": np.nan, "with_inf": 100, "only_techs": 5, + "no_dims": 0, **model_defaults, } ) - model_data.attrs["math"] = AttrDict( - {"constraints": {}, "variables": {}, "global_expressions": {}, "objectives": {}} - ) + # This value is set on the parameter directly to ensure it finds its way through to the LaTex math. + model_data.no_dims.attrs["default"] = 0 + return model_data def populate_backend_model(backend): + backend._add_all_inputs_as_parameters() backend.add_variable( "multi_dim_var", { @@ -330,18 +344,20 @@ def populate_backend_model(backend): @pytest.fixture(scope="module") -def dummy_pyomo_backend_model(dummy_model_data): - backend = pyomo_backend_model.PyomoBackendModel(dummy_model_data) +def dummy_pyomo_backend_model(dummy_model_data, dummy_model_math): + backend = pyomo_backend_model.PyomoBackendModel(dummy_model_data, dummy_model_math) return populate_backend_model(backend) @pytest.fixture(scope="module") -def dummy_latex_backend_model(dummy_model_data): - backend = latex_backend_model.LatexBackendModel(dummy_model_data) +def dummy_latex_backend_model(dummy_model_data, dummy_model_math): + backend = latex_backend_model.LatexBackendModel(dummy_model_data, dummy_model_math) return populate_backend_model(backend) @pytest.fixture(scope="class") -def valid_latex_backend(dummy_model_data): - backend = latex_backend_model.LatexBackendModel(dummy_model_data, include="valid") +def valid_latex_backend(dummy_model_data, dummy_model_math): + backend = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math, include="valid" + ) return populate_backend_model(backend) diff --git a/tests/test_backend_expression_parser.py b/tests/test_backend_expression_parser.py index e68f24f05..ded1feb93 100644 --- a/tests/test_backend_expression_parser.py +++ b/tests/test_backend_expression_parser.py @@ -6,6 +6,7 @@ import pyparsing as pp import pytest import xarray as xr + from calliope import exceptions from calliope.backend import expression_parser, helper_functions @@ -36,40 +37,40 @@ def as_array(self, x, y): return x * 10 + y -@pytest.fixture() +@pytest.fixture def valid_component_names(): return ["foo", "with_inf", "only_techs", "no_dims", "multi_dim_var", "no_dim_var"] -@pytest.fixture() +@pytest.fixture def base_parser_elements(): number, identifier = expression_parser.setup_base_parser_elements() return number, identifier -@pytest.fixture() +@pytest.fixture def number(base_parser_elements): return base_parser_elements[0] -@pytest.fixture() +@pytest.fixture def identifier(base_parser_elements): return base_parser_elements[1] -@pytest.fixture() +@pytest.fixture def evaluatable_identifier(identifier, valid_component_names): return expression_parser.evaluatable_identifier_parser( identifier, valid_component_names ) -@pytest.fixture() +@pytest.fixture def id_list(number, evaluatable_identifier): return expression_parser.list_parser(number, evaluatable_identifier) -@pytest.fixture() +@pytest.fixture def unsliced_param(): def _unsliced_param(valid_component_names): return expression_parser.unsliced_object_parser(valid_component_names) @@ -77,12 +78,12 @@ def _unsliced_param(valid_component_names): return _unsliced_param -@pytest.fixture() +@pytest.fixture def unsliced_param_with_obj_names(unsliced_param, valid_component_names): return unsliced_param(valid_component_names) -@pytest.fixture() +@pytest.fixture def sliced_param( number, identifier, evaluatable_identifier, unsliced_param_with_obj_names ): @@ -91,12 +92,12 @@ def sliced_param( ) -@pytest.fixture() +@pytest.fixture def sub_expression(identifier): return expression_parser.sub_expression_parser(identifier) -@pytest.fixture() +@pytest.fixture def helper_function( number, sliced_param, @@ -116,7 +117,7 @@ def helper_function( ) -@pytest.fixture() +@pytest.fixture def helper_function_no_nesting( number, sliced_param, @@ -157,7 +158,7 @@ def helper_function_one_parser_in_args(identifier, request): ) -@pytest.fixture() +@pytest.fixture def eval_kwargs(dummy_pyomo_backend_model): return { "helper_functions": helper_functions._registry["expression"], @@ -172,7 +173,7 @@ def eval_kwargs(dummy_pyomo_backend_model): } -@pytest.fixture() +@pytest.fixture def arithmetic( helper_function, number, sliced_param, sub_expression, unsliced_param_with_obj_names ): @@ -185,7 +186,7 @@ def arithmetic( ) -@pytest.fixture() +@pytest.fixture def helper_function_allow_arithmetic( number, sliced_param, @@ -209,22 +210,22 @@ def helper_function_allow_arithmetic( ) -@pytest.fixture() +@pytest.fixture def equation_comparison(arithmetic): return expression_parser.equation_comparison_parser(arithmetic) -@pytest.fixture() +@pytest.fixture def generate_equation(valid_component_names): return expression_parser.generate_equation_parser(valid_component_names) -@pytest.fixture() +@pytest.fixture def generate_slice(valid_component_names): return expression_parser.generate_slice_parser(valid_component_names) -@pytest.fixture() +@pytest.fixture def generate_sub_expression(valid_component_names): return expression_parser.generate_sub_expression_parser(valid_component_names) @@ -758,11 +759,11 @@ def var_left(self, request): def var_right(self, request): return request.param - @pytest.fixture() + @pytest.fixture def expected_left(self, var_left): return self.EXPR_PARAMS_AND_EXPECTED_EVAL[var_left] - @pytest.fixture() + @pytest.fixture def expected_right(self, var_right): return self.EXPR_PARAMS_AND_EXPECTED_EVAL[var_right] @@ -770,7 +771,7 @@ def expected_right(self, var_right): def operator(self, request): return request.param - @pytest.fixture() + @pytest.fixture def single_equation_simple(self, var_left, var_right, operator): return f"{var_left} {operator} {var_right}" @@ -849,7 +850,7 @@ def test_repr(self, equation_comparison): class TestAsMathString: - @pytest.fixture() + @pytest.fixture def latex_eval_kwargs(self, dummy_latex_backend_model): return { "helper_functions": helper_functions._registry["expression"], diff --git a/tests/test_backend_general.py b/tests/test_backend_general.py index 506055fe6..8b42fa702 100644 --- a/tests/test_backend_general.py +++ b/tests/test_backend_general.py @@ -1,11 +1,12 @@ import logging -import calliope import numpy as np import pandas as pd import pytest # noqa: F401 import xarray as xr +import calliope + from .common.util import build_test_model as build_model from .common.util import check_error_or_warning @@ -25,18 +26,18 @@ def built_model_cls_longnames(backend) -> calliope.Model: return m -@pytest.fixture() +@pytest.fixture def built_model_func_longnames(backend) -> calliope.Model: m = build_model({}, "simple_supply,two_hours,investment_costs") - m.build(backend=backend) + m.build(backend=backend, pre_validate_math_strings=False) m.backend.verbose_strings() return m -@pytest.fixture() +@pytest.fixture def solved_model_func(backend) -> calliope.Model: m = build_model({}, "simple_supply,two_hours,investment_costs") - m.build(backend=backend) + m.build(backend=backend, pre_validate_math_strings=False) m.solve() return m @@ -65,10 +66,10 @@ def solved_model_cls(backend) -> calliope.Model: return m -@pytest.fixture() +@pytest.fixture def built_model_func_updated_cost_flow_cap(backend, dummy_int: int) -> calliope.Model: m = build_model({}, "simple_supply,two_hours,investment_costs") - m.build(backend=backend) + m.build(backend=backend, pre_validate_math_strings=False) m.backend.verbose_strings() m.backend.update_parameter("cost_flow_cap", dummy_int) return m diff --git a/tests/test_backend_gurobi.py b/tests/test_backend_gurobi.py index 33dd2fa83..d7c1beff7 100755 --- a/tests/test_backend_gurobi.py +++ b/tests/test_backend_gurobi.py @@ -1,8 +1,9 @@ -import calliope.exceptions as exceptions import gurobipy import pytest # noqa: F401 import xarray as xr +import calliope.exceptions as exceptions + from .common.util import build_test_model as build_model from .common.util import check_error_or_warning @@ -24,10 +25,10 @@ def simple_supply_gurobi(self): m.solve() return m - @pytest.fixture() + @pytest.fixture def simple_supply_gurobi_func(self): m = build_model({}, "simple_supply,two_hours,investment_costs") - m.build(backend="gurobi") + m.build(backend="gurobi", pre_validate_math_strings=False) m.solve() return m @@ -260,13 +261,13 @@ def test_to_lp_wrong_file_extension(self, simple_supply_gurobi, tmp_path): class TestShadowPrices: - @pytest.fixture() + @pytest.fixture def simple_supply(self): m = build_model({}, "simple_supply,two_hours,investment_costs") m.build(backend="gurobi") return m - @pytest.fixture() + @pytest.fixture def supply_milp(self): m = build_model({}, "supply_milp,two_hours,investment_costs") m.build(backend="gurobi") diff --git a/tests/test_backend_helper_functions.py b/tests/test_backend_helper_functions.py index 59541a063..9f696927f 100644 --- a/tests/test_backend_helper_functions.py +++ b/tests/test_backend_helper_functions.py @@ -1,6 +1,7 @@ import numpy as np import pytest import xarray as xr + from calliope import exceptions from calliope.backend import helper_functions @@ -71,7 +72,7 @@ def parsing_kwargs(self, dummy_model_data): "return_type": "array", } - @pytest.fixture() + @pytest.fixture def is_defined_any(self, dummy_model_data): def _is_defined(drop_dims, dims): return ( @@ -82,7 +83,7 @@ def _is_defined(drop_dims, dims): return _is_defined - @pytest.fixture() + @pytest.fixture def is_defined_all(self, dummy_model_data): def _is_defined(drop_dims, dims): return ( diff --git a/tests/test_backend_latex_backend.py b/tests/test_backend_latex_backend.py index 7390dae06..e28b0830a 100644 --- a/tests/test_backend_latex_backend.py +++ b/tests/test_backend_latex_backend.py @@ -1,70 +1,12 @@ import textwrap -from pathlib import Path import pytest import xarray as xr + from calliope import exceptions from calliope.backend import latex_backend_model -from .common.util import build_test_model, check_error_or_warning - - -class TestMathDocumentation: - @pytest.fixture(scope="class") - def no_build(self): - return build_test_model({}, "simple_supply,two_hours,investment_costs") - - @pytest.fixture(scope="class") - def build_all(self): - model = build_test_model({}, "simple_supply,two_hours,investment_costs") - model.math_documentation.build(include="all") - return model - - @pytest.fixture(scope="class") - def build_valid(self): - model = build_test_model({}, "simple_supply,two_hours,investment_costs") - model.math_documentation.build(include="valid") - return model - - def test_write_before_build(self, no_build, tmpdir_factory): - filepath = tmpdir_factory.mktemp("custom_math").join("foo.tex") - with pytest.raises(exceptions.ModelError) as excinfo: - no_build.math_documentation.write(filepath) - assert check_error_or_warning( - excinfo, "Build the documentation (`build`) before trying to write it" - ) - - @pytest.mark.parametrize( - ("format", "startswith"), - [ - ("tex", "\n\\documentclass{article}"), - ("rst", "\nObjective"), - ("md", "\n## Objective"), - ], - ) - @pytest.mark.parametrize("include", ["build_all", "build_valid"]) - def test_string_return(self, request, format, startswith, include): - model = request.getfixturevalue(include) - string_math = model.math_documentation.write(format=format) - assert string_math.startswith(startswith) - - def test_to_file(self, build_all, tmpdir_factory): - filepath = tmpdir_factory.mktemp("custom_math").join("custom-math.tex") - build_all.math_documentation.write(filename=filepath) - assert Path(filepath).exists() - - @pytest.mark.parametrize( - ("filepath", "format"), - [(None, "foo"), ("myfile.foo", None), ("myfile.tex", "foo")], - ) - def test_invalid_format(self, build_all, tmpdir_factory, filepath, format): - if filepath is not None: - filepath = tmpdir_factory.mktemp("custom_math").join(filepath) - with pytest.raises(ValueError) as excinfo: # noqa: PT011 - build_all.math_documentation.write(filename="foo", format=format) - assert check_error_or_warning( - excinfo, "Math documentation format must be one of" - ) +from .common.util import check_error_or_warning class TestLatexBackendModel: @@ -382,6 +324,8 @@ def test_create_obj_list(self, dummy_latex_backend_model): \begin{itemize} \item expr \end{itemize} + + \textbf{Default}: 0 \end{document}""" ), ), @@ -420,6 +364,8 @@ def test_create_obj_list(self, dummy_latex_backend_model): **Used in**: * expr + + **Default**: 0 """ ), ), @@ -453,13 +399,20 @@ def test_create_obj_list(self, dummy_latex_backend_model): **Used in**: * [expr](#expr) + + **Default**: 0 """ ), ), ], ) - def test_generate_math_doc(self, dummy_model_data, format, expected): - backend_model = latex_backend_model.LatexBackendModel(dummy_model_data) + def test_generate_math_doc( + self, dummy_model_data, dummy_model_math, format, expected + ): + backend_model = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math + ) + backend_model._add_all_inputs_as_parameters() backend_model.add_global_expression( "expr", { @@ -471,8 +424,10 @@ def test_generate_math_doc(self, dummy_model_data, format, expected): doc = backend_model.generate_math_doc(format=format) assert doc == expected - def test_generate_math_doc_no_params(self, dummy_model_data): - backend_model = latex_backend_model.LatexBackendModel(dummy_model_data) + def test_generate_math_doc_no_params(self, dummy_model_data, dummy_model_math): + backend_model = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math + ) backend_model.add_global_expression( "expr", { @@ -501,8 +456,12 @@ def test_generate_math_doc_no_params(self, dummy_model_data): """ ) - def test_generate_math_doc_mkdocs_features_tabs(self, dummy_model_data): - backend_model = latex_backend_model.LatexBackendModel(dummy_model_data) + def test_generate_math_doc_mkdocs_features_tabs( + self, dummy_model_data, dummy_model_math + ): + backend_model = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math + ) backend_model.add_global_expression( "expr", { @@ -540,8 +499,13 @@ def test_generate_math_doc_mkdocs_features_tabs(self, dummy_model_data): """ ) - def test_generate_math_doc_mkdocs_features_admonition(self, dummy_model_data): - backend_model = latex_backend_model.LatexBackendModel(dummy_model_data) + def test_generate_math_doc_mkdocs_features_admonition( + self, dummy_model_data, dummy_model_math + ): + backend_model = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math + ) + backend_model._add_all_inputs_as_parameters() backend_model.add_global_expression( "expr", { @@ -588,11 +552,17 @@ def test_generate_math_doc_mkdocs_features_admonition(self, dummy_model_data): ??? info "Used in" * [expr](#expr) + + **Default**: 0 """ ) - def test_generate_math_doc_mkdocs_features_not_in_md(self, dummy_model_data): - backend_model = latex_backend_model.LatexBackendModel(dummy_model_data) + def test_generate_math_doc_mkdocs_features_not_in_md( + self, dummy_model_data, dummy_model_math + ): + backend_model = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math + ) with pytest.raises(exceptions.ModelError) as excinfo: backend_model.generate_math_doc(format="rst", mkdocs_features=True) @@ -708,3 +678,56 @@ def test_get_variable_bounds_string(self, dummy_latex_backend_model): "expression": r"\textbf{multi_dim_var}_\text{node,tech} \leq 2\mathord{\times}10^{+06}" } assert refs == {"multi_dim_var"} + + def test_param_type(self, dummy_model_data, dummy_model_math): + backend_model = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math + ) + backend_model._add_all_inputs_as_parameters() + backend_model.add_global_expression( + "expr", + { + "equations": [{"expression": "1 + flow_cap_max"}], + "description": "foobar", + "default": 0, + }, + ) + doc = backend_model.generate_math_doc(format="md") + assert doc == textwrap.dedent( + r""" + + ## Where + + ### expr + + foobar + + **Uses**: + + * [flow_cap_max](#flow_cap_max) + + **Default**: 0 + + $$ + \begin{array}{l} + \quad 1 + \textit{flow\_cap\_max}\\ + \end{array} + $$ + + ## Parameters + + ### flow_cap_max + + Limits `flow_cap` to a maximum. + + **Used in**: + + * [expr](#expr) + + **Unit**: power. + + **Default**: inf + + **Type**: float + """ + ) diff --git a/tests/test_backend_module.py b/tests/test_backend_module.py index dd664aee9..f220a3b9e 100644 --- a/tests/test_backend_module.py +++ b/tests/test_backend_module.py @@ -1,20 +1,25 @@ """Test backend module functionality (`__init__.py`).""" import pytest + from calliope import backend from calliope.backend.backend_model import BackendModel from calliope.exceptions import BackendError -@pytest.mark.parametrize("valid_backend", backend.MODEL_BACKENDS) +@pytest.mark.parametrize("valid_backend", ["pyomo", "gurobi"]) def test_valid_model_backend(simple_supply, valid_backend): """Requesting a valid model backend must result in a backend instance.""" - backend_obj = backend.get_model_backend(valid_backend, simple_supply._model_data) + backend_obj = backend.get_model_backend( + valid_backend, simple_supply._model_data, simple_supply.applied_math + ) assert isinstance(backend_obj, BackendModel) @pytest.mark.parametrize("spam", ["not_real", None, True, 1]) -def test_invalid_model_backend(spam): +def test_invalid_model_backend(spam, simple_supply): """Backend requests should catch invalid setups.""" with pytest.raises(BackendError): - backend.get_model_backend(spam, None) + backend.get_model_backend( + spam, simple_supply._model_data, simple_supply.applied_math + ) diff --git a/tests/test_backend_parsing.py b/tests/test_backend_parsing.py index fca9ea41d..8847738c8 100644 --- a/tests/test_backend_parsing.py +++ b/tests/test_backend_parsing.py @@ -2,11 +2,12 @@ from io import StringIO from unittest.mock import patch -import calliope import pyparsing as pp import pytest import ruamel.yaml as yaml import xarray as xr + +import calliope from calliope.backend import backend_model, expression_parser, parsing, where_parser from .common.util import check_error_or_warning @@ -19,7 +20,7 @@ def string_to_dict(yaml_string): return yaml_loader.load(StringIO(yaml_string)) -@pytest.fixture() +@pytest.fixture def component_obj(): setup_string = """ foreach: [A, A1] @@ -31,43 +32,43 @@ def component_obj(): return parsing.ParsedBackendComponent("constraints", "foo", variable_data) -@pytest.fixture() +@pytest.fixture def exists_array(component_obj, dummy_model_data): component_obj.sets = ["nodes", "techs"] return component_obj.combine_definition_matrix_and_foreach(dummy_model_data) -@pytest.fixture() +@pytest.fixture def valid_component_names(dummy_model_data): return ["foo", "bar", "baz", "foobar", *dummy_model_data.data_vars.keys()] -@pytest.fixture() +@pytest.fixture def expression_string_parser(valid_component_names): return expression_parser.generate_equation_parser(valid_component_names) -@pytest.fixture() +@pytest.fixture def arithmetic_string_parser(valid_component_names): return expression_parser.generate_arithmetic_parser(valid_component_names) -@pytest.fixture() +@pytest.fixture def slice_parser(valid_component_names): return expression_parser.generate_slice_parser(valid_component_names) -@pytest.fixture() +@pytest.fixture def sub_expression_parser(valid_component_names): return expression_parser.generate_sub_expression_parser(valid_component_names) -@pytest.fixture() +@pytest.fixture def where_string_parser(): return where_parser.generate_where_string_parser() -@pytest.fixture() +@pytest.fixture def expression_generator(): def _expression_generator(parse_string, where_string=None): expression_dict = {"expression": parse_string} @@ -78,7 +79,7 @@ def _expression_generator(parse_string, where_string=None): return _expression_generator -@pytest.fixture() +@pytest.fixture def generate_expression_list(component_obj, expression_string_parser): def _generate_expression_list(expression_list, **kwargs): return component_obj.generate_expression_list( @@ -99,7 +100,7 @@ def parse_sub_expressions_and_slices( } -@pytest.fixture() +@pytest.fixture def parsed_sub_expression_dict(component_obj, sub_expression_parser): def _parsed_sub_expression_dict(n_foo, n_bar): foos = ", ".join( @@ -122,7 +123,7 @@ def _parsed_sub_expression_dict(n_foo, n_bar): return _parsed_sub_expression_dict -@pytest.fixture() +@pytest.fixture def parsed_slice_dict(component_obj, slice_parser): def _parsed_slice_dict(n_tech1, n_tech2): techs1 = ", ".join(["{where: techs, expression: foo}" for i in range(n_tech1)]) @@ -141,7 +142,7 @@ def _parsed_slice_dict(n_tech1, n_tech2): return _parsed_slice_dict -@pytest.fixture() +@pytest.fixture def obj_with_sub_expressions_and_slices(): def _obj_with_sub_expressions_and_slices(equation_string): if isinstance(equation_string, str): @@ -181,7 +182,7 @@ def _obj_with_sub_expressions_and_slices(equation_string): return _obj_with_sub_expressions_and_slices -@pytest.fixture() +@pytest.fixture def equation_obj(expression_string_parser, where_string_parser): return parsing.ParsedBackendEquation( equation_name="foo", @@ -191,7 +192,7 @@ def equation_obj(expression_string_parser, where_string_parser): ) -@pytest.fixture() +@pytest.fixture def equation_sub_expression_obj(sub_expression_parser, where_string_parser): def _equation_sub_expression_obj(name): return parsing.ParsedBackendEquation( @@ -204,7 +205,7 @@ def _equation_sub_expression_obj(name): return _equation_sub_expression_obj -@pytest.fixture() +@pytest.fixture def equation_slice_obj(slice_parser, where_string_parser): def _equation_slice_obj(name): return parsing.ParsedBackendEquation( @@ -217,15 +218,15 @@ def _equation_slice_obj(name): return _equation_slice_obj -@pytest.fixture() -def dummy_backend_interface(dummy_model_data): +@pytest.fixture +def dummy_backend_interface(dummy_model_data, dummy_model_math): # ignore the need to define the abstract methods from backend_model.BackendModel with patch.multiple(backend_model.BackendModel, __abstractmethods__=set()): class DummyBackendModel(backend_model.BackendModel): def __init__(self): backend_model.BackendModel.__init__( - self, dummy_model_data, instance=None + self, dummy_model_data, dummy_model_math, instance=None ) self._dataset = dummy_model_data.copy(deep=True) @@ -239,7 +240,7 @@ def __init__(self): return DummyBackendModel() -@pytest.fixture() +@pytest.fixture def evaluatable_component_obj(valid_component_names): def _evaluatable_component_obj(equation_expressions): setup_string = f""" @@ -291,7 +292,7 @@ def evaluate_component_where( return component_obj, equation_where_aligned, request.param[1] -@pytest.fixture() +@pytest.fixture def evaluate_component_expression(evaluate_component_where, dummy_backend_interface): component_obj, equation_where, n_true = evaluate_component_where @@ -992,7 +993,7 @@ def test_evaluate_expression(self, evaluate_component_expression): class TestParsedConstraint: - @pytest.fixture() + @pytest.fixture def constraint_obj(self): dict_ = { "foreach": ["techs"], @@ -1044,7 +1045,7 @@ def test_parse_constraint_dict_evaluate_eq2( class TestParsedVariable: - @pytest.fixture() + @pytest.fixture def variable_obj(self): dict_ = {"foreach": ["techs"], "where": "False"} @@ -1066,7 +1067,7 @@ def test_parse_variable_dict_empty_eq1( class TestParsedObjective: - @pytest.fixture() + @pytest.fixture def objective_obj(self): dict_ = { "equations": [ diff --git a/tests/test_backend_pyomo.py b/tests/test_backend_pyomo.py index 40be58d33..710d147a4 100755 --- a/tests/test_backend_pyomo.py +++ b/tests/test_backend_pyomo.py @@ -1,19 +1,19 @@ -import importlib import logging -from copy import deepcopy from itertools import product -import calliope -import calliope.exceptions as exceptions import numpy as np import pyomo.core as po import pyomo.kernel as pmo import pytest # noqa: F401 import xarray as xr -from calliope.attrdict import AttrDict -from calliope.backend.pyomo_backend_model import PyomoBackendModel from pyomo.core.kernel.piecewise_library.transforms import piecewise_sos2 +import calliope +import calliope.backend +import calliope.exceptions as exceptions +import calliope.preprocess +from calliope.backend import PyomoBackendModel + from .common.util import build_test_model as build_model from .common.util import check_error_or_warning, check_variable_exists @@ -1523,8 +1523,8 @@ def cluster_model( ): override = { "config.init.time_subset": ["2005-01-01", "2005-01-04"], - "config.init.time_cluster": "data_sources/cluster_days.csv", - "config.init.add_math": ( + "config.init.time_cluster": "data_tables/cluster_days.csv", + "config.build.add_math": ( ["storage_inter_cluster"] if storage_inter_cluster else [] ), "config.build.cyclic_storage": cyclic, @@ -1626,63 +1626,38 @@ def simple_supply_updated_cost_flow_cap( simple_supply.backend.update_parameter("cost_flow_cap", dummy_int) return simple_supply - @pytest.fixture() + @pytest.fixture def temp_path(self, tmpdir_factory): return tmpdir_factory.mktemp("custom_math") - @pytest.mark.parametrize("mode", ["operate", "spores"]) + @pytest.mark.parametrize("mode", ["operate", "plan"]) def test_add_run_mode_custom_math(self, caplog, mode): caplog.set_level(logging.DEBUG) - mode_custom_math = AttrDict.from_yaml( - importlib.resources.files("calliope") / "math" / f"{mode}.yaml" - ) m = build_model({}, "simple_supply,two_hours,investment_costs") + math = calliope.preprocess.CalliopeMath([mode]) - base_math = deepcopy(m.math) - base_math.union(mode_custom_math, allow_override=True) + backend = PyomoBackendModel(m.inputs, math, mode=mode) - backend = PyomoBackendModel(m.inputs, mode=mode) - backend._add_run_mode_math() + assert backend.math == math - assert f"Updating math formulation with {mode} mode math." in caplog.text - - assert m.math != base_math - assert backend.inputs.attrs["math"].as_dict() == base_math.as_dict() - - def test_add_run_mode_custom_math_before_build(self, caplog, temp_path): - """A user can override the run mode math by including it directly in the additional math list""" + def test_add_run_mode_custom_math_before_build(self, caplog): + """Run mode math is applied before anything else.""" caplog.set_level(logging.DEBUG) - custom_math = AttrDict({"variables": {"flow_cap": {"active": True}}}) - file_path = temp_path.join("custom-math.yaml") - custom_math.to_yaml(file_path) + custom_math = {"constraints": {"force_zero_area_use": {"active": True}}} m = build_model( - {"config.init.add_math": ["operate", str(file_path)]}, + { + "config.build.operate_window": "12H", + "config.build.operate_horizon": "12H", + }, "simple_supply,two_hours,investment_costs", ) - backend = PyomoBackendModel(m.inputs, mode="operate") - backend._add_run_mode_math() - - # We set operate mode explicitly in our additional math so it won't be added again - assert "Updating math formulation with operate mode math." not in caplog.text + m.build(mode="operate", add_math_dict=custom_math) # operate mode set it to false, then our math set it back to active - assert m.math.variables.flow_cap.active + assert m.applied_math.data.constraints.force_zero_area_use.active # operate mode set it to false and our math did not override that - assert not m.math.variables.storage_cap.active - - def test_run_mode_mismatch(self): - m = build_model( - {"config.init.add_math": ["operate"]}, - "simple_supply,two_hours,investment_costs", - ) - backend = PyomoBackendModel(m.inputs) - with pytest.warns(exceptions.ModelWarning) as excinfo: - backend._add_run_mode_math() - - assert check_error_or_warning( - excinfo, "Running in plan mode, but run mode(s) {'operate'}" - ) + assert not m.applied_math.data.variables.storage_cap.active def test_new_build_get_variable(self, simple_supply): """Check a decision variable has the correct data type and has all expected attributes.""" @@ -2145,31 +2120,31 @@ def test_fails_on_not_reaching_bounds( class TestShadowPrices: - @pytest.fixture() + @pytest.fixture def simple_supply(self): m = build_model({}, "simple_supply,two_hours,investment_costs") m.build() return m - @pytest.fixture() + @pytest.fixture def supply_milp(self): m = build_model({}, "supply_milp,two_hours,investment_costs") m.build() return m - @pytest.fixture() + @pytest.fixture def simple_supply_with_yaml_shadow_prices(self): m = build_model({}, "simple_supply,two_hours,investment_costs,shadow_prices") m.build() return m - @pytest.fixture() + @pytest.fixture def simple_supply_yaml(self): m = build_model({}, "simple_supply,two_hours,investment_costs,shadow_prices") m.build() return m - @pytest.fixture() + @pytest.fixture def simple_supply_yaml_invalid(self): m = build_model( {}, @@ -2178,7 +2153,7 @@ def simple_supply_yaml_invalid(self): m.build() return m - @pytest.fixture() + @pytest.fixture def supply_milp_yaml(self): m = build_model({}, "supply_milp,two_hours,investment_costs,shadow_prices") m.build() @@ -2262,3 +2237,84 @@ def test_yaml_with_invalid_constraint(self, simple_supply_yaml_invalid): ) # Since we listed only one (invalid) constraint, tracking should not be active assert not m.backend.shadow_prices.is_active + + +class TestValidateMathDict: + LOGGER = "calliope.backend.backend_model" + + @pytest.fixture + def validate_math(self): + def _validate_math(math_dict: dict): + m = build_model({}, "simple_supply,investment_costs") + math = calliope.preprocess.CalliopeMath(["plan", math_dict]) + backend = calliope.backend.PyomoBackendModel(m._model_data, math) + backend._add_all_inputs_as_parameters() + backend._validate_math_string_parsing() + + return _validate_math + + def test_base_math(self, caplog, validate_math): + with caplog.at_level(logging.INFO, logger=self.LOGGER): + validate_math({}) + assert "Optimisation Model | Validated math strings." in [ + rec.message for rec in caplog.records + ] + + @pytest.mark.parametrize( + ("equation", "where"), + [ + ("1 == 1", "True"), + ( + "sum(flow_out * flow_out_eff, over=[nodes, carriers, techs, timesteps]) <= .inf", + "base_tech=supply and flow_out_eff>0", + ), + ], + ) + def test_add_math(self, caplog, validate_math, equation, where): + with caplog.at_level(logging.INFO, logger=self.LOGGER): + validate_math( + { + "constraints": { + "foo": {"equations": [{"expression": equation}], "where": where} + } + } + ) + assert "Optimisation Model | Validated math strings." in [ + rec.message for rec in caplog.records + ] + + @pytest.mark.parametrize( + "component_dict", + [ + {"equations": [{"expression": "1 = 1"}]}, + {"equations": [{"expression": "1 = 1"}], "where": "foo[bar]"}, + ], + ) + @pytest.mark.parametrize("both_fail", [True, False]) + def test_add_math_fails(self, validate_math, component_dict, both_fail): + math_dict = {"constraints": {"foo": component_dict}} + errors_to_check = [ + "math string parsing (marker indicates where parsing stopped, but may not point to the root cause of the issue)", + " * constraints:foo:", + "equations[0].expression", + "where", + ] + if both_fail: + math_dict["constraints"]["bar"] = component_dict + errors_to_check.append("* constraints:bar:") + else: + math_dict["constraints"]["bar"] = {"equations": [{"expression": "1 == 1"}]} + + with pytest.raises(calliope.exceptions.ModelError) as excinfo: + validate_math(math_dict) + assert check_error_or_warning(excinfo, errors_to_check) + + @pytest.mark.parametrize("eq_string", ["1 = 1", "1 ==\n1[a]"]) + def test_add_math_fails_marker_correct_position(self, validate_math, eq_string): + math_dict = {"constraints": {"foo": {"equations": [{"expression": eq_string}]}}} + + with pytest.raises(calliope.exceptions.ModelError) as excinfo: + validate_math(math_dict) + errorstrings = str(excinfo.value).split("\n") + # marker should be at the "=" sign, i.e., 2 characters from the end + assert len(errorstrings[-2]) - 2 == len(errorstrings[-1]) diff --git a/tests/test_backend_pyomo_objective.py b/tests/test_backend_pyomo_objective.py index 2427daa1b..2c9ffe581 100644 --- a/tests/test_backend_pyomo_objective.py +++ b/tests/test_backend_pyomo_objective.py @@ -1,7 +1,8 @@ -import calliope import pyomo.core as po import pytest +import calliope + from .common.util import build_test_model as build_model approx = pytest.approx diff --git a/tests/test_backend_where_parser.py b/tests/test_backend_where_parser.py index a64caead8..696201553 100644 --- a/tests/test_backend_where_parser.py +++ b/tests/test_backend_where_parser.py @@ -2,6 +2,7 @@ import pyparsing import pytest import xarray as xr + from calliope.attrdict import AttrDict from calliope.backend import expression_parser, helper_functions, where_parser from calliope.exceptions import BackendError @@ -17,50 +18,50 @@ def parse_yaml(yaml_string): return AttrDict.from_yaml_string(yaml_string) -@pytest.fixture() +@pytest.fixture def base_parser_elements(): number, identifier = expression_parser.setup_base_parser_elements() return number, identifier -@pytest.fixture() +@pytest.fixture def number(base_parser_elements): return base_parser_elements[0] -@pytest.fixture() +@pytest.fixture def identifier(base_parser_elements): return base_parser_elements[1] -@pytest.fixture() +@pytest.fixture def data_var(identifier): return where_parser.data_var_parser(identifier) -@pytest.fixture() +@pytest.fixture def config_option(identifier): return where_parser.config_option_parser(identifier) -@pytest.fixture() +@pytest.fixture def bool_operand(): return where_parser.bool_parser() -@pytest.fixture() +@pytest.fixture def evaluatable_string(identifier): return where_parser.evaluatable_string_parser(identifier) -@pytest.fixture() +@pytest.fixture def helper_function(number, identifier, evaluatable_string): return expression_parser.helper_function_parser( evaluatable_string, number, generic_identifier=identifier ) -@pytest.fixture() +@pytest.fixture def comparison( evaluatable_string, number, helper_function, bool_operand, config_option, data_var ): @@ -74,19 +75,19 @@ def comparison( ) -@pytest.fixture() +@pytest.fixture def subset(identifier, evaluatable_string, number): return where_parser.subset_parser(identifier, evaluatable_string, number) -@pytest.fixture() +@pytest.fixture def where(bool_operand, helper_function, data_var, comparison, subset): return where_parser.where_parser( bool_operand, helper_function, data_var, comparison, subset ) -@pytest.fixture() +@pytest.fixture def eval_kwargs(dummy_pyomo_backend_model): return { "input_data": dummy_pyomo_backend_model.inputs, @@ -98,7 +99,7 @@ def eval_kwargs(dummy_pyomo_backend_model): } -@pytest.fixture() +@pytest.fixture def parse_where_string(eval_kwargs, where): def _parse_where_string(where_string): parsed_ = where.parse_string(where_string, parse_all=True) @@ -563,7 +564,7 @@ def test_where_malformed(self, where, instring): class TestAsMathString: - @pytest.fixture() + @pytest.fixture def latex_eval_kwargs(self, eval_kwargs, dummy_latex_backend_model): eval_kwargs["return_type"] = "math_string" eval_kwargs["backend_interface"] = dummy_latex_backend_model diff --git a/tests/test_cli.py b/tests/test_cli.py index 67cc97ee9..7a3e33744 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -2,12 +2,13 @@ import tempfile from pathlib import Path -import calliope import importlib_resources import pytest # noqa: F401 -from calliope import AttrDict, cli from click.testing import CliRunner +import calliope +from calliope import AttrDict, cli + _MODEL_NATIONAL = ( importlib_resources.files("calliope") / "example_models" diff --git a/tests/test_constraint_results.py b/tests/test_constraint_results.py index 4b7afd197..06f3e633c 100644 --- a/tests/test_constraint_results.py +++ b/tests/test_constraint_results.py @@ -1,6 +1,7 @@ -import calliope import pytest +import calliope + from .common.util import build_test_model as build_model approx = pytest.approx @@ -103,7 +104,7 @@ def _get_flow(model, flow): @pytest.mark.skip(reason="to be reimplemented by comparison to LP files") class TestModelSettings: - @pytest.fixture() + @pytest.fixture def run_model(self): def _run_model(feasibility, cap_val): override_dict = { @@ -131,15 +132,15 @@ def _run_model(feasibility, cap_val): return _run_model - @pytest.fixture() + @pytest.fixture def model_no_unmet(self, run_model): return run_model(True, 10) - @pytest.fixture() + @pytest.fixture def model_unmet_demand(self, run_model): return run_model(True, 5) - @pytest.fixture() + @pytest.fixture def model_unused_supply(self, run_model): return run_model(True, 15) @@ -191,7 +192,7 @@ def test_expected_infeasible_result(self, override, run_model): @pytest.mark.skip(reason="to be reimplemented by comparison to LP files") class TestEnergyCapacityPerStorageCapacity: - @pytest.fixture() + @pytest.fixture def model_file(self): return "flow_cap_per_storage_cap.yaml" diff --git a/tests/test_core_attrdict.py b/tests/test_core_attrdict.py index c47231974..c65ab18e3 100644 --- a/tests/test_core_attrdict.py +++ b/tests/test_core_attrdict.py @@ -5,13 +5,14 @@ import numpy as np import pytest import ruamel.yaml as ruamel_yaml + from calliope.attrdict import _MISSING, AttrDict from .common.util import check_error_or_warning class TestAttrDict: - @pytest.fixture() + @pytest.fixture def regular_dict(self): d = { "a": 1, @@ -37,16 +38,16 @@ def regular_dict(self): d: """ - @pytest.fixture() + @pytest.fixture def yaml_filepath(self): this_path = Path(__file__).parent return this_path / "common" / "yaml_file.yaml" - @pytest.fixture() + @pytest.fixture def yaml_string(self): return self.setup_string - @pytest.fixture() + @pytest.fixture def attr_dict(self, regular_dict): d = regular_dict return AttrDict(d) diff --git a/tests/test_core_model.py b/tests/test_core_model.py index 47d8d4ba5..e16ebfa4b 100644 --- a/tests/test_core_model.py +++ b/tests/test_core_model.py @@ -1,11 +1,13 @@ import logging from contextlib import contextmanager -import calliope -import numpy as np import pandas as pd import pytest +import calliope +import calliope.backend +import calliope.preprocess + from .common.util import build_test_model as build_model from .common.util import check_error_or_warning @@ -65,222 +67,6 @@ def test_add_observed_dict_not_dict(self, national_scale_example): ) -class TestAddMath: - @pytest.fixture(scope="class") - def storage_inter_cluster(self): - return build_model( - {"config.init.add_math": ["storage_inter_cluster"]}, - "simple_supply,two_hours,investment_costs", - ) - - @pytest.fixture(scope="class") - def storage_inter_cluster_plus_user_def(self, temp_path, dummy_int: int): - new_constraint = calliope.AttrDict( - {"variables": {"storage": {"bounds": {"min": dummy_int}}}} - ) - file_path = temp_path.join("custom-math.yaml") - new_constraint.to_yaml(file_path) - return build_model( - {"config.init.add_math": ["storage_inter_cluster", str(file_path)]}, - "simple_supply,two_hours,investment_costs", - ) - - @pytest.fixture(scope="class") - def temp_path(self, tmpdir_factory): - return tmpdir_factory.mktemp("custom_math") - - def test_internal_override(self, storage_inter_cluster): - assert "storage_intra_max" in storage_inter_cluster.math["constraints"].keys() - - def test_variable_bound(self, storage_inter_cluster): - assert ( - storage_inter_cluster.math["variables"]["storage"]["bounds"]["min"] - == -np.inf - ) - - @pytest.mark.parametrize( - ("override", "expected"), - [ - (["foo"], ["foo"]), - (["bar", "foo"], ["bar", "foo"]), - (["foo", "storage_inter_cluster"], ["foo"]), - (["foo.yaml"], ["foo.yaml"]), - ], - ) - def test_allowed_internal_constraint(self, override, expected): - with pytest.raises(calliope.exceptions.ModelError) as excinfo: - build_model( - {"config.init.add_math": override}, - "simple_supply,two_hours,investment_costs", - ) - assert check_error_or_warning( - excinfo, - f"Attempted to load additional math that does not exist: {expected}", - ) - - def test_internal_override_from_yaml(self, temp_path): - new_constraint = calliope.AttrDict( - { - "constraints": { - "constraint_name": { - "foreach": [], - "where": "", - "equations": [{"expression": ""}], - } - } - } - ) - new_constraint.to_yaml(temp_path.join("custom-math.yaml")) - m = build_model( - {"config.init.add_math": [str(temp_path.join("custom-math.yaml"))]}, - "simple_supply,two_hours,investment_costs", - ) - assert "constraint_name" in m.math["constraints"].keys() - - def test_override_existing_internal_constraint(self, temp_path, simple_supply): - file_path = temp_path.join("custom-math.yaml") - new_constraint = calliope.AttrDict( - { - "constraints": { - "flow_capacity_per_storage_capacity_min": {"foreach": ["nodes"]} - } - } - ) - new_constraint.to_yaml(file_path) - m = build_model( - {"config.init.add_math": [str(file_path)]}, - "simple_supply,two_hours,investment_costs", - ) - base = simple_supply.math["constraints"][ - "flow_capacity_per_storage_capacity_min" - ] - new = m.math["constraints"]["flow_capacity_per_storage_capacity_min"] - - for i in base.keys(): - if i == "foreach": - assert new[i] == ["nodes"] - else: - assert base[i] == new[i] - - def test_override_order(self, temp_path, simple_supply): - to_add = [] - for path_suffix, foreach in [(1, "nodes"), (2, "techs")]: - constr = calliope.AttrDict( - { - "constraints.flow_capacity_per_storage_capacity_min.foreach": [ - foreach - ] - } - ) - filepath = temp_path.join(f"custom-math-{path_suffix}.yaml") - constr.to_yaml(filepath) - to_add.append(str(filepath)) - - m = build_model( - {"config.init.add_math": to_add}, "simple_supply,two_hours,investment_costs" - ) - - base = simple_supply.math["constraints"][ - "flow_capacity_per_storage_capacity_min" - ] - new = m.math["constraints"]["flow_capacity_per_storage_capacity_min"] - - for i in base.keys(): - if i == "foreach": - assert new[i] == ["techs"] - else: - assert base[i] == new[i] - - def test_override_existing_internal_constraint_merge( - self, simple_supply, storage_inter_cluster, storage_inter_cluster_plus_user_def - ): - storage_inter_cluster_math = storage_inter_cluster.math["variables"]["storage"] - base_math = simple_supply.math["variables"]["storage"] - new_math = storage_inter_cluster_plus_user_def.math["variables"]["storage"] - expected = { - "title": storage_inter_cluster_math["title"], - "description": storage_inter_cluster_math["description"], - "default": base_math["default"], - "unit": base_math["unit"], - "foreach": base_math["foreach"], - "where": base_math["where"], - "bounds": { - "min": new_math["bounds"]["min"], - "max": base_math["bounds"]["max"], - }, - } - - assert new_math == expected - - -class TestValidateMathDict: - def test_base_math(self, caplog, simple_supply): - with caplog.at_level(logging.INFO, logger=LOGGER): - simple_supply.validate_math_strings(simple_supply.math) - assert "Model: validated math strings" in [ - rec.message for rec in caplog.records - ] - - @pytest.mark.parametrize( - ("equation", "where"), - [ - ("1 == 1", "True"), - ( - "flow_out * flow_out_eff + sum(cost, over=costs) <= .inf", - "base_tech=supply and flow_out_eff>0", - ), - ], - ) - def test_add_math(self, caplog, simple_supply, equation, where): - with caplog.at_level(logging.INFO, logger=LOGGER): - simple_supply.validate_math_strings( - { - "constraints": { - "foo": {"equations": [{"expression": equation}], "where": where} - } - } - ) - assert "Model: validated math strings" in [ - rec.message for rec in caplog.records - ] - - @pytest.mark.parametrize( - "component_dict", - [ - {"equations": [{"expression": "1 = 1"}]}, - {"equations": [{"expression": "1 = 1"}], "where": "foo[bar]"}, - ], - ) - @pytest.mark.parametrize("both_fail", [True, False]) - def test_add_math_fails(self, simple_supply, component_dict, both_fail): - math_dict = {"constraints": {"foo": component_dict}} - errors_to_check = [ - "math string parsing (marker indicates where parsing stopped, which might not be the root cause of the issue; sorry...)", - " * constraints:foo:", - "equations[0].expression", - "where", - ] - if both_fail: - math_dict["constraints"]["bar"] = component_dict - errors_to_check.append("* constraints:bar:") - else: - math_dict["constraints"]["bar"] = {"equations": [{"expression": "1 == 1"}]} - - with pytest.raises(calliope.exceptions.ModelError) as excinfo: - simple_supply.validate_math_strings(math_dict) - assert check_error_or_warning(excinfo, errors_to_check) - - @pytest.mark.parametrize("eq_string", ["1 = 1", "1 ==\n1[a]"]) - def test_add_math_fails_marker_correct_position(self, simple_supply, eq_string): - math_dict = {"constraints": {"foo": {"equations": [{"expression": eq_string}]}}} - - with pytest.raises(calliope.exceptions.ModelError) as excinfo: - simple_supply.validate_math_strings(math_dict) - errorstrings = str(excinfo.value).split("\n") - # marker should be at the "=" sign, i.e., 2 characters from the end - assert len(errorstrings[-2]) - 2 == len(errorstrings[-1]) - - class TestOperateMode: @contextmanager def caplog_session(self, request): @@ -399,6 +185,34 @@ def test_build_operate_not_allowed_build(self): m.build(mode="operate") +class TestBuild: + @pytest.fixture(scope="class") + def init_model(self): + return build_model({}, "simple_supply,two_hours,investment_costs") + + def test_ignore_mode_math(self, init_model): + init_model.build(ignore_mode_math=True, force=True) + assert all( + var.obj_type == "parameters" + for var in init_model.backend._dataset.data_vars.values() + ) + + def test_add_math_dict_with_mode_math(self, init_model): + init_model.build( + add_math_dict={"constraints": {"system_balance": {"active": False}}}, + force=True, + ) + assert len(init_model.backend.constraints) > 0 + assert "system_balance" not in init_model.backend.constraints + + def test_add_math_dict_ignore_mode_math(self, init_model): + new_var = { + "variables": {"foo": {"active": True, "bounds": {"min": -1, "max": 1}}} + } + init_model.build(add_math_dict=new_var, ignore_mode_math=True, force=True) + assert set(init_model.backend.variables) == {"foo"} + + class TestSolve: def test_solve_before_build(self): m = build_model({}, "simple_supply,two_hours,investment_costs") diff --git a/tests/test_core_preprocess.py b/tests/test_core_preprocess.py index 51a5108ed..b0f286f46 100644 --- a/tests/test_core_preprocess.py +++ b/tests/test_core_preprocess.py @@ -1,9 +1,10 @@ import warnings -import calliope -import calliope.exceptions as exceptions import pandas as pd import pytest + +import calliope +import calliope.exceptions as exceptions from calliope.attrdict import AttrDict from .common.util import build_test_model as build_model @@ -25,8 +26,8 @@ def test_model_from_dict(self, data_source_dir): } ) model_dict.union(node_dict) - for src in model_dict["data_sources"].values(): - src["source"] = (model_dir / src["source"]).as_posix() + for src in model_dict["data_tables"].values(): + src["data"] = (model_dir / src["data"]).as_posix() # test as AttrDict calliope.Model(model_dict) @@ -36,18 +37,18 @@ def test_model_from_dict(self, data_source_dir): @pytest.mark.filterwarnings( "ignore:(?s).*(links, test_link_a_b_elec) | Deactivated:calliope.exceptions.ModelWarning" ) - def test_valid_scenarios(self): + def test_valid_scenarios(self, dummy_int): """Test that valid scenario definition from overrides raises no error and results in applied scenario.""" override = AttrDict.from_yaml_string( - """ + f""" scenarios: scenario_1: ['one', 'two'] overrides: one: - techs.test_supply_gas.flow_cap_max: 20 + techs.test_supply_gas.flow_cap_max: {dummy_int} two: - techs.test_supply_elec.flow_cap_max: 20 + techs.test_supply_elec.flow_cap_max: {dummy_int/2} nodes: a: @@ -59,24 +60,29 @@ def test_valid_scenarios(self): ) model = build_model(override_dict=override, scenario="scenario_1") - assert model._model_def_dict.techs.test_supply_gas.flow_cap_max == 20 - assert model._model_def_dict.techs.test_supply_elec.flow_cap_max == 20 + assert ( + model._model_data.sel(techs="test_supply_gas")["flow_cap_max"] == dummy_int + ) + assert ( + model._model_data.sel(techs="test_supply_elec")["flow_cap_max"] + == dummy_int / 2 + ) - def test_valid_scenario_of_scenarios(self): + def test_valid_scenario_of_scenarios(self, dummy_int): """Test that valid scenario definition which groups scenarios and overrides raises no error and results in applied scenario. """ override = AttrDict.from_yaml_string( - """ + f""" scenarios: scenario_1: ['one', 'two'] scenario_2: ['scenario_1', 'new_location'] overrides: one: - techs.test_supply_gas.flow_cap_max: 20 + techs.test_supply_gas.flow_cap_max: {dummy_int} two: - techs.test_supply_elec.flow_cap_max: 20 + techs.test_supply_elec.flow_cap_max: {dummy_int/2} new_location: nodes.b.techs: test_supply_elec: @@ -91,8 +97,13 @@ def test_valid_scenario_of_scenarios(self): ) model = build_model(override_dict=override, scenario="scenario_2") - assert model._model_def_dict.techs.test_supply_gas.flow_cap_max == 20 - assert model._model_def_dict.techs.test_supply_elec.flow_cap_max == 20 + assert ( + model._model_data.sel(techs="test_supply_gas")["flow_cap_max"] == dummy_int + ) + assert ( + model._model_data.sel(techs="test_supply_elec")["flow_cap_max"] + == dummy_int / 2 + ) def test_invalid_scenarios_dict(self): """Test that invalid scenario definition raises appropriate error""" @@ -201,7 +212,7 @@ def test_inconsistent_time_indices_fails(self): """ # should fail: wrong length of demand_heat csv vs demand_elec override = AttrDict.from_yaml_string( - "data_sources.demand_elec.source: data_sources/demand_heat_wrong_length.csv" + "data_tables.demand_elec.data: data_tables/demand_heat_wrong_length.csv" ) # check in output error that it points to: 07/01/2005 10:00:00 with pytest.warns(exceptions.ModelWarning) as excinfo: @@ -212,7 +223,7 @@ def test_inconsistent_time_indices_fails(self): def test_inconsistent_time_indices_passes_thanks_to_time_subsetting(self): override = AttrDict.from_yaml_string( - "data_sources.demand_elec.source: data_sources/demand_heat_wrong_length.csv" + "data_tables.demand_elec.data: data_tables/demand_heat_wrong_length.csv" ) # should pass: wrong length of demand_heat csv, but time subsetting removes the difference with warnings.catch_warnings(): @@ -344,7 +355,7 @@ def test_clustering_and_cyclic_storage(self): """ override = { "config.init.time_subset": ["2005-01-01", "2005-01-04"], - "config.init.time_cluster": "data_sources/cluster_days.csv", + "config.init.time_cluster": "data_tables/cluster_days.csv", "config.build.cyclic_storage": True, } diff --git a/tests/test_core_util.py b/tests/test_core_util.py index e71c6e2cd..8e9175bae 100644 --- a/tests/test_core_util.py +++ b/tests/test_core_util.py @@ -3,12 +3,13 @@ import logging from pathlib import Path -import calliope import importlib_resources import jsonschema import numpy as np import pandas as pd import pytest + +import calliope from calliope.util import schema from calliope.util.generate_runs import generate_runs from calliope.util.logging import log_time @@ -180,10 +181,10 @@ def test_invalid_dict(self, to_validate, expected_path): ], ) - @pytest.fixture() + @pytest.fixture def base_math(self): return calliope.AttrDict.from_yaml( - Path(calliope.__file__).parent / "math" / "base.yaml" + Path(calliope.__file__).parent / "math" / "plan.yaml" ) @pytest.mark.parametrize( @@ -194,7 +195,8 @@ def test_validate_math(self, base_math, dict_path): Path(calliope.__file__).parent / "config" / "math_schema.yaml" ) to_validate = base_math.union( - calliope.AttrDict.from_yaml(dict_path), allow_override=True + calliope.AttrDict.from_yaml(dict_path, allow_override=True), + allow_override=True, ) schema.validate_dict(to_validate, math_schema, "") @@ -320,7 +322,7 @@ def sample_model_def_schema(self): """ return calliope.AttrDict.from_yaml_string(schema_string) - @pytest.fixture() + @pytest.fixture def expected_config_defaults(self): return pd.Series( { @@ -330,7 +332,7 @@ def expected_config_defaults(self): } ).sort_index() - @pytest.fixture() + @pytest.fixture def expected_model_def_defaults(self): return pd.Series( { diff --git a/tests/test_example_models.py b/tests/test_example_models.py index 9708e12e3..507e3d509 100755 --- a/tests/test_example_models.py +++ b/tests/test_example_models.py @@ -1,10 +1,11 @@ import shutil from pathlib import Path -import calliope import numpy as np import pandas as pd import pytest + +import calliope from calliope import exceptions from .common.util import check_error_or_warning @@ -16,7 +17,7 @@ class TestModelPreproccessing: def test_preprocess_national_scale(self): calliope.examples.national_scale() - @pytest.mark.time_intensive() + @pytest.mark.time_intensive def test_preprocess_time_clustering(self): calliope.examples.time_clustering() @@ -36,11 +37,11 @@ def test_preprocess_operate(self): class TestNationalScaleExampleModelSenseChecks: @pytest.fixture(scope="class") - def nat_model_from_data_sources(self): + def nat_model_from_data_tables(self): df = pd.read_csv( calliope.examples._EXAMPLE_MODEL_DIR / "national_scale" - / "data_sources" + / "data_tables" / "time_varying_params.csv", index_col=0, header=[0, 1, 2, 3], @@ -48,9 +49,9 @@ def nat_model_from_data_sources(self): model = calliope.Model( Path(__file__).parent / "common" - / "national_scale_from_data_sources" + / "national_scale_from_data_tables" / "model.yaml", - data_source_dfs={"time_varying_df": df}, + data_table_dfs={"time_varying_df": df}, time_subset=["2005-01-01", "2005-01-01"], ) model.build() @@ -64,7 +65,7 @@ def nat_model(self): model.build() return model - @pytest.fixture(params=["nat_model", "nat_model_from_data_sources"]) + @pytest.fixture(params=["nat_model", "nat_model_from_data_tables"]) def example_tester(self, request): def _example_tester(solver="cbc", solver_io=None): model = request.getfixturevalue(request.param) @@ -112,7 +113,7 @@ def _example_tester(solver="cbc", solver_io=None): def test_nationalscale_example_results_cbc(self, example_tester): example_tester() - @pytest.mark.needs_gurobi_license() + @pytest.mark.needs_gurobi_license def test_nationalscale_example_results_gurobi(self, example_tester): pytest.importorskip("gurobipy") example_tester(solver="gurobi", solver_io="python") @@ -130,7 +131,7 @@ def test_nationalscale_example_results_glpk(self, example_tester): pytest.skip("GLPK not installed") def test_fails_gracefully_without_timeseries(self): - override = {"data_sources": {"_REPLACE_": {}}} + override = {"data_tables": {"_REPLACE_": {}}} with pytest.raises(calliope.exceptions.ModelError) as excinfo: calliope.examples.national_scale(override_dict=override) @@ -214,7 +215,7 @@ def example_tester(self, solver="cbc", solver_io=None): def test_nationalscale_example_results_cbc(self): self.example_tester() - @pytest.mark.needs_gurobi_license() + @pytest.mark.needs_gurobi_license @pytest.mark.filterwarnings( "ignore:(?s).*`gurobi_persistent`.*:calliope.exceptions.ModelWarning" ) @@ -230,7 +231,7 @@ def test_nationalscale_example_results_gurobi(self): assert np.allclose(gurobi_data.flow_cap, gurobi_persistent_data.flow_cap) assert np.allclose(gurobi_data.cost, gurobi_persistent_data.cost) - @pytest.fixture() + @pytest.fixture def base_model_data(self): model = calliope.examples.national_scale( time_subset=["2005-01-01", "2005-01-03"], scenario="spores" @@ -269,7 +270,7 @@ def test_fail_with_spores_as_input_dim(self, base_model_data): excinfo, "Cannot run SPORES with a SPORES dimension in any input" ) - @pytest.fixture() + @pytest.fixture def spores_with_override(self): def _spores_with_override(override_dict): result_without_override = self.example_tester() @@ -396,10 +397,10 @@ def test_nationalscale_resampled_example_results_glpk(self): class TestUrbanScaleExampleModelSenseChecks: def example_tester(self, source_unit, solver="cbc", solver_io=None): - data_sources = f"data_sources.pv_resource.select.scaler: {source_unit}" + data_tables = f"data_tables.pv_resource.select.scaler: {source_unit}" unit_override = { "techs.pv.source_unit": source_unit, - **calliope.AttrDict.from_yaml_string(data_sources), + **calliope.AttrDict.from_yaml_string(data_tables), } model = calliope.examples.urban_scale( @@ -441,7 +442,7 @@ def example_tester(self, source_unit, solver="cbc", solver_io=None): def test_urban_example_results_area(self): self.example_tester("per_area") - @pytest.mark.needs_gurobi_license() + @pytest.mark.needs_gurobi_license def test_urban_example_results_area_gurobi(self): pytest.importorskip("gurobipy") self.example_tester("per_area", solver="gurobi", solver_io="python") @@ -449,7 +450,7 @@ def test_urban_example_results_area_gurobi(self): def test_urban_example_results_cap(self): self.example_tester("per_cap") - @pytest.mark.needs_gurobi_license() + @pytest.mark.needs_gurobi_license def test_urban_example_results_cap_gurobi(self): pytest.importorskip("gurobipy") self.example_tester("per_cap", solver="gurobi", solver_io="python") diff --git a/tests/test_io.py b/tests/test_io.py index 05b30a618..b496db6bf 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1,10 +1,11 @@ import os import tempfile -import calliope -import calliope.io import pytest # noqa: F401 import xarray as xr + +import calliope +import calliope.io from calliope import exceptions from .common.util import check_error_or_warning @@ -104,7 +105,7 @@ def test_serialised_list_popped(self, request, serialised_list, model_name): ("serialised_nones", ["foo_none", "scenario"]), ( "serialised_dicts", - ["foo_dict", "foo_attrdict", "defaults", "config", "math"], + ["foo_dict", "foo_attrdict", "defaults", "config", "applied_math"], ), ("serialised_sets", ["foo_set", "foo_set_1_item"]), ("serialised_single_element_list", ["foo_list_1_item", "foo_set_1_item"]), @@ -181,7 +182,7 @@ def test_save_csv_not_optimal(self): with pytest.warns(exceptions.ModelWarning): model.to_csv(out_path, dropna=False) - @pytest.mark.parametrize("attr", ["config", "math"]) + @pytest.mark.parametrize("attr", ["config"]) def test_dicts_as_model_attrs_and_property(self, model_from_file, attr): assert attr in model_from_file._model_data.attrs.keys() assert hasattr(model_from_file, attr) @@ -199,11 +200,9 @@ def test_save_read_solve_save_netcdf(self, model, tmpdir_factory): model.to_netcdf(out_path) model_from_disk = calliope.read_netcdf(out_path) - # Ensure _model_def_dict doesn't exist to simulate a re-run via the backend - delattr(model_from_disk, "_model_def_dict") + # Simulate a re-run via the backend model_from_disk.build() model_from_disk.solve(force=True) - assert not hasattr(model_from_disk, "_model_def_dict") with tempfile.TemporaryDirectory() as tempdir: out_path = os.path.join(tempdir, "model.nc") diff --git a/tests/test_math.py b/tests/test_math.py index 6b05fb77d..35aed4e71 100644 --- a/tests/test_math.py +++ b/tests/test_math.py @@ -4,12 +4,14 @@ import numpy as np import pytest -from calliope import AttrDict from pyomo.repn.tests import lp_diff +from calliope import AttrDict + from .common.util import build_lp, build_test_model CALLIOPE_DIR: Path = importlib.resources.files("calliope") +PLAN_MATH: AttrDict = AttrDict.from_yaml(CALLIOPE_DIR / "math" / "plan.yaml") @pytest.fixture(scope="class") @@ -45,7 +47,7 @@ class TestBaseMath: @pytest.fixture(scope="class") def base_math(self): - return AttrDict.from_yaml(CALLIOPE_DIR / "math" / "base.yaml") + return AttrDict.from_yaml(CALLIOPE_DIR / "math" / "plan.yaml") def test_flow_cap(self, compare_lps): self.TEST_REGISTER.add("variables.flow_cap") @@ -78,7 +80,7 @@ def test_storage_max(self, compare_lps): self.TEST_REGISTER.add("constraints.storage_max") model = build_test_model(scenario="simple_storage,two_hours,investment_costs") custom_math = { - "constraints": {"storage_max": model.math.constraints.storage_max} + "constraints": {"storage_max": PLAN_MATH.constraints.storage_max} } compare_lps(model, custom_math, "storage_max") @@ -93,7 +95,7 @@ def test_flow_out_max(self, compare_lps): ) custom_math = { - "constraints": {"flow_out_max": model.math.constraints.flow_out_max} + "constraints": {"flow_out_max": PLAN_MATH.constraints.flow_out_max} } compare_lps(model, custom_math, "flow_out_max") @@ -105,7 +107,7 @@ def test_balance_conversion(self, compare_lps): ) custom_math = { "constraints": { - "balance_conversion": model.math.constraints.balance_conversion + "balance_conversion": PLAN_MATH.constraints.balance_conversion } } @@ -117,7 +119,7 @@ def test_source_max(self, compare_lps): {}, "simple_supply_plus,resample_two_days,investment_costs" ) custom_math = { - "constraints": {"my_constraint": model.math.constraints.source_max} + "constraints": {"my_constraint": PLAN_MATH.constraints.source_max} } compare_lps(model, custom_math, "source_max") @@ -128,9 +130,7 @@ def test_balance_transmission(self, compare_lps): {"techs.test_link_a_b_elec.one_way": True}, "simple_conversion,two_hours" ) custom_math = { - "constraints": { - "my_constraint": model.math.constraints.balance_transmission - } + "constraints": {"my_constraint": PLAN_MATH.constraints.balance_transmission} } compare_lps(model, custom_math, "balance_transmission") @@ -145,7 +145,7 @@ def test_balance_storage(self, compare_lps): "simple_storage,two_hours", ) custom_math = { - "constraints": {"my_constraint": model.math.constraints.balance_storage} + "constraints": {"my_constraint": PLAN_MATH.constraints.balance_storage} } compare_lps(model, custom_math, "balance_storage") @@ -234,7 +234,7 @@ def abs_filepath(self): def custom_math(self): return AttrDict.from_yaml(self.CUSTOM_MATH_DIR / self.YAML_FILEPATH) - @pytest.fixture() + @pytest.fixture def build_and_compare(self, abs_filepath, compare_lps): def _build_and_compare( filename: str, @@ -260,7 +260,7 @@ def _build_and_compare( overrides = {} model = build_test_model( - {"config.init.add_math": [abs_filepath], **overrides}, scenario + {"config.build.add_math": [abs_filepath], **overrides}, scenario ) compare_lps(model, custom_math, filename) @@ -769,9 +769,9 @@ class TestNetImportShare(CustomMathExamples): YAML_FILEPATH = "net_import_share.yaml" shared_overrides = { "parameters.net_import_share": 1.5, - "data_sources": { + "data_tables": { "demand_heat": { - "source": "data_sources/demand_heat.csv", + "data": "data_tables/demand_heat.csv", "rows": "timesteps", "columns": "nodes", "select": {"nodes": "a"}, diff --git a/tests/test_postprocess_math_documentation.py b/tests/test_postprocess_math_documentation.py new file mode 100644 index 000000000..fb2558de7 --- /dev/null +++ b/tests/test_postprocess_math_documentation.py @@ -0,0 +1,59 @@ +from pathlib import Path + +import pytest + +from calliope.postprocess.math_documentation import MathDocumentation + +from .common.util import build_test_model, check_error_or_warning + + +class TestMathDocumentation: + @pytest.fixture(scope="class") + def no_build(self): + model = build_test_model({}, "simple_supply,two_hours,investment_costs") + model.build() + return model + + @pytest.fixture(scope="class") + def build_all(self): + model = build_test_model({}, "simple_supply,two_hours,investment_costs") + model.build() + return MathDocumentation(model, include="all") + + @pytest.fixture(scope="class") + def build_valid(self): + model = build_test_model({}, "simple_supply,two_hours,investment_costs") + model.build() + return MathDocumentation(model, include="valid") + + @pytest.mark.parametrize( + ("format", "startswith"), + [ + ("tex", "\n\\documentclass{article}"), + ("rst", "\nObjective"), + ("md", "\n## Objective"), + ], + ) + @pytest.mark.parametrize("include", ["build_all", "build_valid"]) + def test_string_return(self, request, format, startswith, include): + math_documentation = request.getfixturevalue(include) + string_math = math_documentation.write(format=format) + assert string_math.startswith(startswith) + + def test_to_file(self, build_all, tmpdir_factory): + filepath = tmpdir_factory.mktemp("custom_math").join("custom-math.tex") + build_all.write(filename=filepath) + assert Path(filepath).exists() + + @pytest.mark.parametrize( + ("filepath", "format"), + [(None, "foo"), ("myfile.foo", None), ("myfile.tex", "foo")], + ) + def test_invalid_format(self, build_all, tmpdir_factory, filepath, format): + if filepath is not None: + filepath = tmpdir_factory.mktemp("custom_math").join(filepath) + with pytest.raises(ValueError) as excinfo: # noqa: PT011 + build_all.write(filename="foo", format=format) + assert check_error_or_warning( + excinfo, "Math documentation format must be one of" + ) diff --git a/tests/test_preprocess_data_sources.py b/tests/test_preprocess_data_sources.py index af0817e9a..ae9598da4 100644 --- a/tests/test_preprocess_data_sources.py +++ b/tests/test_preprocess_data_sources.py @@ -1,9 +1,10 @@ import logging -import calliope import pandas as pd import pytest -from calliope.preprocess import data_sources + +import calliope +from calliope.preprocess import data_tables from calliope.util.schema import CONFIG_SCHEMA, extract_from_schema from .common.util import check_error_or_warning @@ -16,55 +17,55 @@ def init_config(): @pytest.fixture(scope="class") def data_dir(tmp_path_factory): - filepath = tmp_path_factory.mktemp("data_sources") + filepath = tmp_path_factory.mktemp("data_tables") return filepath @pytest.fixture(scope="class") -def generate_data_source_dict(data_dir): - def _generate_data_source_dict(filename, df, rows, columns): +def generate_data_table_dict(data_dir): + def _generate_data_table_dict(filename, df, rows, columns): filepath = data_dir / filename df.rename_axis(index=rows).to_csv(filepath) return { - "source": filepath.as_posix(), + "data": filepath.as_posix(), "rows": rows, "columns": columns, "add_dims": {"parameters": "test_param"}, } - return _generate_data_source_dict + return _generate_data_table_dict -class TestDataSourceUtils: +class TestDataTableUtils: @pytest.fixture(scope="class") - def source_obj(self, init_config, generate_data_source_dict): + def table_obj(self, init_config, generate_data_table_dict): df = pd.Series({"bar": 0, "baz": 1}) - source_dict = generate_data_source_dict( + table_dict = generate_data_table_dict( "foo.csv", df, rows="test_row", columns=None ) - ds = data_sources.DataSource(init_config, "ds_name", source_dict) + ds = data_tables.DataTable(init_config, "ds_name", table_dict) ds.input["foo"] = ["foobar"] return ds - def test_name(self, source_obj): - assert source_obj.name == "ds_name" + def test_name(self, table_obj): + assert table_obj.name == "ds_name" - def test_raise_error(self, data_dir, source_obj): + def test_raise_error(self, data_dir, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj._raise_error("bar") - assert check_error_or_warning(excinfo, "(data_sources, ds_name) | bar.") + table_obj._raise_error("bar") + assert check_error_or_warning(excinfo, "(data_tables, ds_name) | bar.") - def test_log_message(self, caplog, data_dir, source_obj): + def test_log_message(self, caplog, data_dir, table_obj): caplog.set_level(logging.INFO) - source_obj._log("bar", "info") - assert "(data_sources, ds_name) | bar." in caplog.text + table_obj._log("bar", "info") + assert "(data_tables, ds_name) | bar." in caplog.text @pytest.mark.parametrize( ("key", "expected"), [("rows", ["test_row"]), ("columns", None), ("foo", ["foobar"])], ) - def test_listify_if_defined(self, source_obj, key, expected): - output = source_obj._listify_if_defined(key) + def test_listify_if_defined(self, table_obj, key, expected): + output = table_obj._listify_if_defined(key) if expected is None: assert output is expected else: @@ -80,8 +81,8 @@ def test_listify_if_defined(self, source_obj, key, expected): ([None, 1], ["foo", "bar"]), ], ) - def test_compare_axis_names_passes(self, source_obj, loaded, defined): - source_obj._compare_axis_names(loaded, defined, "foobar") + def test_compare_axis_names_passes(self, table_obj, loaded, defined): + table_obj._compare_axis_names(loaded, defined, "foobar") @pytest.mark.parametrize( ("loaded", "defined"), @@ -91,46 +92,46 @@ def test_compare_axis_names_passes(self, source_obj, loaded, defined): (["bar", 1], ["foo", "bar"]), ], ) - def test_compare_axis_names_fails(self, source_obj, loaded, defined): + def test_compare_axis_names_fails(self, table_obj, loaded, defined): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj._compare_axis_names(loaded, defined, "foobar") + table_obj._compare_axis_names(loaded, defined, "foobar") assert check_error_or_warning(excinfo, "Trying to set names for foobar") -class TestDataSourceInitOneLevel: +class TestDataTableInitOneLevel: @pytest.fixture(scope="class") - def multi_row_no_col_data(self, generate_data_source_dict): + def multi_row_no_col_data(self, generate_data_table_dict): df = pd.Series({"bar": 0, "baz": 1}) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "multi_row_no_col_file.csv", df, rows="test_row", columns=None ) @pytest.fixture(scope="class") - def multi_row_one_col_data(self, generate_data_source_dict): + def multi_row_one_col_data(self, generate_data_table_dict): df = pd.DataFrame({"foo": {"bar": 0, "baz": 1}}) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "multi_row_one_col_file.csv", df, rows="test_row", columns="test_col" ) @pytest.fixture(scope="class") - def one_row_multi_col_data(self, generate_data_source_dict): + def one_row_multi_col_data(self, generate_data_table_dict): df = pd.DataFrame({"foo": {"bar": 0}, "foobar": {"bar": 1}}) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "one_row_multi_col_file.csv", df, rows="test_row", columns="test_col" ) @pytest.fixture(scope="class") - def multi_row_multi_col_data(self, generate_data_source_dict): + def multi_row_multi_col_data(self, generate_data_table_dict): df = pd.DataFrame( {"foo": {"bar": 0, "baz": 10}, "foobar": {"bar": 0, "baz": 20}} ) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "multi_row_multi_col_file.csv", df, rows="test_row", columns="test_col" ) def test_multi_row_no_col(self, init_config, multi_row_no_col_data): - expected_df, source_dict = multi_row_no_col_data - ds = data_sources.DataSource(init_config, "ds_name", source_dict) + expected_df, table_dict = multi_row_no_col_data + ds = data_tables.DataTable(init_config, "ds_name", table_dict) test_param = ds.dataset["test_param"] assert not set(["test_row"]).symmetric_difference(test_param.dims) pd.testing.assert_series_equal( @@ -138,16 +139,16 @@ def test_multi_row_no_col(self, init_config, multi_row_no_col_data): ) @pytest.mark.parametrize( - "data_source_ref", + "data_table_ref", [ "multi_row_one_col_data", "one_row_multi_col_data", "multi_row_multi_col_data", ], ) - def test_multi_row_one_col(self, init_config, request, data_source_ref): - expected_df, source_dict = request.getfixturevalue(data_source_ref) - ds = data_sources.DataSource(init_config, "ds_name", source_dict) + def test_multi_row_one_col(self, init_config, request, data_table_ref): + expected_df, table_dict = request.getfixturevalue(data_table_ref) + ds = data_tables.DataTable(init_config, "ds_name", table_dict) test_param = ds.dataset["test_param"] assert not set(["test_row", "test_col"]).symmetric_difference(test_param.dims) pd.testing.assert_series_equal( @@ -155,21 +156,21 @@ def test_multi_row_one_col(self, init_config, request, data_source_ref): ) @pytest.mark.parametrize( - "data_source_ref", + "data_table_ref", [ "multi_row_one_col_data", "one_row_multi_col_data", "multi_row_multi_col_data", ], ) - def test_load_from_df(self, init_config, request, data_source_ref): - expected_df, source_dict = request.getfixturevalue(data_source_ref) - source_dict["source"] = data_source_ref - ds = data_sources.DataSource( + def test_load_from_df(self, init_config, request, data_table_ref): + expected_df, table_dict = request.getfixturevalue(data_table_ref) + table_dict["data"] = data_table_ref + ds = data_tables.DataTable( init_config, "ds_name", - source_dict, - data_source_dfs={data_source_ref: expected_df}, + table_dict, + data_table_dfs={data_table_ref: expected_df}, ) test_param = ds.dataset["test_param"] assert not set(["test_row", "test_col"]).symmetric_difference(test_param.dims) @@ -178,25 +179,20 @@ def test_load_from_df(self, init_config, request, data_source_ref): ) def test_load_from_df_must_be_df(self, init_config, multi_row_no_col_data): - expected_df, source_dict = multi_row_no_col_data - source_dict["source"] = "foo" + expected_df, table_dict = multi_row_no_col_data + table_dict["data"] = "foo" with pytest.raises(calliope.exceptions.ModelError) as excinfo: - data_sources.DataSource( - init_config, - "ds_name", - source_dict, - data_source_dfs={"foo": expected_df}, + data_tables.DataTable( + init_config, "ds_name", table_dict, data_table_dfs={"foo": expected_df} ) - assert check_error_or_warning( - excinfo, "Data source must be a pandas DataFrame." - ) + assert check_error_or_warning(excinfo, "Data table must be a pandas DataFrame.") -class TestDataSourceInitMultiLevel: +class TestDataTableInitMultiLevel: @pytest.fixture(scope="class") - def multi_row_no_col_data(self, generate_data_source_dict): + def multi_row_no_col_data(self, generate_data_table_dict): df = pd.Series({("bar1", "bar2"): 0, ("baz1", "baz2"): 1}) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "multi_row_no_col_file.csv", df, rows=["test_row1", "test_row2"], @@ -204,9 +200,9 @@ def multi_row_no_col_data(self, generate_data_source_dict): ) @pytest.fixture(scope="class") - def multi_row_one_col_data(self, generate_data_source_dict): + def multi_row_one_col_data(self, generate_data_table_dict): df = pd.DataFrame({"foo": {("bar1", "bar2"): 0, ("baz1", "baz2"): 1}}) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "multi_row_one_col_file.csv", df, rows=["test_row1", "test_row2"], @@ -214,11 +210,11 @@ def multi_row_one_col_data(self, generate_data_source_dict): ) @pytest.fixture(scope="class") - def one_row_multi_col_data(self, generate_data_source_dict): + def one_row_multi_col_data(self, generate_data_table_dict): df = pd.DataFrame( {("foo1", "foo2"): {"bar": 0}, ("foobar1", "foobar2"): {"bar": 1}} ) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "one_row_multi_col_file.csv", df, rows=["test_row"], @@ -226,14 +222,14 @@ def one_row_multi_col_data(self, generate_data_source_dict): ) @pytest.fixture(scope="class") - def multi_row_multi_col_data(self, generate_data_source_dict): + def multi_row_multi_col_data(self, generate_data_table_dict): df = pd.DataFrame( { ("foo1", "foo2"): {("bar1", "bar2"): 0, ("baz1", "baz2"): 10}, ("foobar1", "foobar2"): {("bar1", "bar2"): 0, ("baz1", "baz2"): 20}, } ) - return df, generate_data_source_dict( + return df, generate_data_table_dict( "multi_row_multi_col_file.csv", df, rows=["test_row1", "test_row2"], @@ -241,8 +237,8 @@ def multi_row_multi_col_data(self, generate_data_source_dict): ) def test_multi_row_no_col(self, init_config, multi_row_no_col_data): - expected_df, source_dict = multi_row_no_col_data - ds = data_sources.DataSource(init_config, "ds_name", source_dict) + expected_df, table_dict = multi_row_no_col_data + ds = data_tables.DataTable(init_config, "ds_name", table_dict) test_param = ds.dataset["test_param"] assert not set(["test_row1", "test_row2"]).symmetric_difference(test_param.dims) pd.testing.assert_series_equal( @@ -253,31 +249,31 @@ def test_multi_row_no_col(self, init_config, multi_row_no_col_data): ) @pytest.mark.parametrize( - "data_source_ref", + "data_table_ref", [ "multi_row_one_col_data", "one_row_multi_col_data", "multi_row_multi_col_data", ], ) - def test_multi_row_one_col(self, init_config, request, data_source_ref): - expected_df, source_dict = request.getfixturevalue(data_source_ref) - ds = data_sources.DataSource(init_config, "ds_name", source_dict) + def test_multi_row_one_col(self, init_config, request, data_table_ref): + expected_df, table_dict = request.getfixturevalue(data_table_ref) + ds = data_tables.DataTable(init_config, "ds_name", table_dict) test_param = ds.dataset["test_param"] - all_dims = source_dict["rows"] + source_dict["columns"] + all_dims = table_dict["rows"] + table_dict["columns"] assert not set(all_dims).symmetric_difference(test_param.dims) pd.testing.assert_frame_equal( - test_param.to_series().dropna().unstack(source_dict["columns"]), + test_param.to_series().dropna().unstack(table_dict["columns"]), expected_df, check_names=False, check_dtype=False, ) -class TestDataSourceSelectDropAdd: +class TestDataTableSelectDropAdd: @pytest.fixture(scope="class") - def source_obj(self, init_config): - def _source_obj(**source_dict_kwargs): + def table_obj(self, init_config): + def _table_obj(**table_dict_kwargs): df = pd.DataFrame( { "test_param": { @@ -288,80 +284,80 @@ def _source_obj(**source_dict_kwargs): } } ) - source_dict = { - "source": "df", + table_dict = { + "data": "df", "rows": ["test_row1", "test_row2"], "columns": "parameters", - **source_dict_kwargs, + **table_dict_kwargs, } - ds = data_sources.DataSource( - init_config, "ds_name", source_dict, data_source_dfs={"df": df} + ds = data_tables.DataTable( + init_config, "ds_name", table_dict, data_table_dfs={"df": df} ) return ds - return _source_obj + return _table_obj - def test_select_keep_one(self, source_obj): - data_source = source_obj(select={"test_row1": "bar1"}) + def test_select_keep_one(self, table_obj): + data_table = table_obj(select={"test_row1": "bar1"}) expected = pd.Series({("bar1", "baz1"): 0, ("bar1", "baz4"): 3}) - assert data_source.dataset.coords["test_row1"].item() == "bar1" + assert data_table.dataset.coords["test_row1"].item() == "bar1" pd.testing.assert_series_equal( - data_source.dataset.test_param.to_series().dropna(), + data_table.dataset.test_param.to_series().dropna(), expected.sort_index(), check_dtype=False, check_names=False, ) - def test_select_keep_two(self, source_obj): - data_source = source_obj(select={"test_row1": ["bar1", "bar2"]}) + def test_select_keep_two(self, table_obj): + data_table = table_obj(select={"test_row1": ["bar1", "bar2"]}) expected = pd.Series( {("bar1", "baz1"): 0, ("bar2", "baz2"): 1, ("bar1", "baz4"): 3} ) assert not set(["bar1", "bar2"]).symmetric_difference( - data_source.dataset.coords["test_row1"].values + data_table.dataset.coords["test_row1"].values ) pd.testing.assert_series_equal( - data_source.dataset.test_param.to_series().dropna(), + data_table.dataset.test_param.to_series().dropna(), expected.sort_index(), check_dtype=False, check_names=False, ) - def test_select_drop_one(self, source_obj): - data_source = source_obj( + def test_select_drop_one(self, table_obj): + data_table = table_obj( select={"test_row1": "bar2", "test_row2": "baz2"}, drop=["test_row1", "test_row2"], ) - assert not data_source.dataset.dims - assert data_source.dataset.test_param.item() == 1 + assert not data_table.dataset.dims + assert data_table.dataset.test_param.item() == 1 - def test_select_drop_two(self, source_obj): - data_source = source_obj(select={"test_row1": "bar1"}, drop="test_row1") + def test_select_drop_two(self, table_obj): + data_table = table_obj(select={"test_row1": "bar1"}, drop="test_row1") expected = pd.Series({"baz1": 0, "baz4": 3}) - assert "test_row1" not in data_source.dataset.dims + assert "test_row1" not in data_table.dataset.dims pd.testing.assert_series_equal( - data_source.dataset.test_param.to_series().dropna(), + data_table.dataset.test_param.to_series().dropna(), expected.sort_index(), check_dtype=False, check_names=False, ) - def test_drop_one(self, source_obj): - data_source = source_obj(drop="test_row1") + def test_drop_one(self, table_obj): + data_table = table_obj(drop="test_row1") expected = pd.Series({"baz1": 0, "baz2": 1, "baz3": 2, "baz4": 3}) - assert "test_row1" not in data_source.dataset.dims + assert "test_row1" not in data_table.dataset.dims pd.testing.assert_series_equal( - data_source.dataset.test_param.to_series().dropna(), + data_table.dataset.test_param.to_series().dropna(), expected.sort_index(), check_dtype=False, check_names=False, ) -class TestDataSourceMalformed: +class TestDataTableMalformed: @pytest.fixture(scope="class") - def source_obj(self, init_config): - def _source_obj(**source_dict_kwargs): + def table_obj(self, init_config): + def _table_obj(**table_dict_kwargs): df = pd.DataFrame( { "foo": { @@ -372,71 +368,71 @@ def _source_obj(**source_dict_kwargs): } } ) - source_dict = { - "source": "df", + table_dict = { + "data": "df", "rows": ["test_row1", "test_row2"], - **source_dict_kwargs, + **table_dict_kwargs, } - ds = data_sources.DataSource( - init_config, "ds_name", source_dict, data_source_dfs={"df": df} + ds = data_tables.DataTable( + init_config, "ds_name", table_dict, data_table_dfs={"df": df} ) return ds - return _source_obj + return _table_obj - def test_check_processed_tdf_no_parameters_dim(self, source_obj): + def test_check_processed_tdf_no_parameters_dim(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj() + table_obj() assert check_error_or_warning(excinfo, "The `parameters` dimension must exist") - def test_check_processed_tdf_duplicated_idx(self, source_obj): + def test_check_processed_tdf_duplicated_idx(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj(drop="test_row2", add_dims={"parameters": "test_param"}) + table_obj(drop="test_row2", add_dims={"parameters": "test_param"}) assert check_error_or_warning(excinfo, "Duplicate index items found:") - def test_check_processed_tdf_duplicated_dim_name(self, source_obj): + def test_check_processed_tdf_duplicated_dim_name(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj(add_dims={"test_row2": "foo", "parameters": "test_param"}) + table_obj(add_dims={"test_row2": "foo", "parameters": "test_param"}) assert check_error_or_warning(excinfo, "Duplicate dimension names found:") - def test_too_many_called_cols(self, source_obj): + def test_too_many_called_cols(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj(columns=["foo", "bar"]) + table_obj(columns=["foo", "bar"]) assert check_error_or_warning( excinfo, "Expected 2 columns levels in loaded data." ) - def test_too_few_called_rows(self, source_obj): + def test_too_few_called_rows(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj(rows=None) + table_obj(rows=None) assert check_error_or_warning( excinfo, "Expected a single index level in loaded data." ) - def test_check_for_protected_params(self, source_obj): + def test_check_for_protected_params(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj(add_dims={"parameters": "definition_matrix"}) + table_obj(add_dims={"parameters": "definition_matrix"}) assert check_error_or_warning( excinfo, "`definition_matrix` is a protected array" ) -class TestDataSourceLookupDictFromParam: +class TestDataTableLookupDictFromParam: @pytest.fixture(scope="class") - def source_obj(self, init_config): + def table_obj(self, init_config): df = pd.DataFrame( { "FOO": {("foo1", "bar1"): 1, ("foo1", "bar2"): 1}, "BAR": {("foo1", "bar1"): 1, ("foo2", "bar2"): 1}, } ) - source_dict = { - "source": "df", + table_dict = { + "data": "df", "rows": ["techs", "carriers"], "columns": "parameters", } - ds = data_sources.DataSource( - init_config, "ds_name", source_dict, data_source_dfs={"df": df} + ds = data_tables.DataTable( + init_config, "ds_name", table_dict, data_table_dfs={"df": df} ) return ds @@ -447,49 +443,49 @@ def source_obj(self, init_config): ("BAR", {"foo1": {"BAR": "bar1"}, "foo2": {"BAR": "bar2"}}), ], ) - def test_carrier_info_dict_from_model_data_var(self, source_obj, param, expected): - carrier_info = source_obj.lookup_dict_from_param(param, "carriers") + def test_carrier_info_dict_from_model_data_var(self, table_obj, param, expected): + carrier_info = table_obj.lookup_dict_from_param(param, "carriers") assert carrier_info == expected - def test_carrier_info_dict_from_model_data_var_missing_dim(self, source_obj): + def test_carrier_info_dict_from_model_data_var_missing_dim(self, table_obj): with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj.lookup_dict_from_param("FOO", "foobar") + table_obj.lookup_dict_from_param("FOO", "foobar") check_error_or_warning( excinfo, "Loading FOO with missing dimension(s). Must contain `techs` and `foobar`, received: ('techs', 'carriers')", ) -class TestDataSourceTechDict: +class TestDataTableTechDict: @pytest.fixture(scope="class") - def source_obj(self, init_config): - def _source_obj(df_dict, rows="techs"): + def table_obj(self, init_config): + def _table_obj(df_dict, rows="techs"): df = pd.DataFrame(df_dict) - source_dict = {"source": "df", "rows": rows, "columns": "parameters"} - ds = data_sources.DataSource( - init_config, "ds_name", source_dict, data_source_dfs={"df": df} + table_dict = {"data": "df", "rows": rows, "columns": "parameters"} + ds = data_tables.DataTable( + init_config, "ds_name", table_dict, data_table_dfs={"df": df} ) return ds - return _source_obj + return _table_obj - def test_tech_dict_from_one_param(self, source_obj): + def test_tech_dict_from_one_param(self, table_obj): df_dict = {"test_param": {"foo1": 1, "foo2": 2}} - tech_dict, base_dict = source_obj(df_dict).tech_dict() + tech_dict, base_dict = table_obj(df_dict).tech_dict() assert tech_dict == {"foo1": {}, "foo2": {}} assert base_dict == {} - def test_tech_dict_from_two_param(self, source_obj): + def test_tech_dict_from_two_param(self, table_obj): df_dict = {"foo": {"foo1": 1, "foo2": 2}, "bar": {"bar1": 1, "bar2": 2}} - tech_dict, base_dict = source_obj(df_dict).tech_dict() + tech_dict, base_dict = table_obj(df_dict).tech_dict() assert tech_dict == {"foo1": {}, "foo2": {}, "bar1": {}, "bar2": {}} assert base_dict == {} - def test_tech_dict_from_parent(self, source_obj): + def test_tech_dict_from_parent(self, table_obj): df_dict = {"base_tech": {"foo1": "transmission", "foo2": "supply"}} - tech_dict, base_dict = source_obj(df_dict).tech_dict() + tech_dict, base_dict = table_obj(df_dict).tech_dict() assert tech_dict == {"foo1": {}, "foo2": {}} assert base_dict == { @@ -497,19 +493,19 @@ def test_tech_dict_from_parent(self, source_obj): "foo2": {"base_tech": "supply"}, } - def test_tech_dict_from_parent_and_param(self, source_obj): + def test_tech_dict_from_parent_and_param(self, table_obj): df_dict = {"base_tech": {"foo1": "transmission"}, "other_param": {"bar1": 1}} - tech_dict, base_dict = source_obj(df_dict).tech_dict() + tech_dict, base_dict = table_obj(df_dict).tech_dict() assert tech_dict == {"foo1": {}, "bar1": {}} assert base_dict == {"foo1": {"base_tech": "transmission"}} - def test_tech_dict_from_to_from(self, source_obj): + def test_tech_dict_from_to_from(self, table_obj): df_dict = { "from": {"foo1": "bar1", "foo2": "bar2"}, "to": {"foo1": "bar2", "foo3": "bar1"}, } - tech_dict, base_dict = source_obj(df_dict).tech_dict() + tech_dict, base_dict = table_obj(df_dict).tech_dict() assert tech_dict == {"foo1": {}, "foo2": {}, "foo3": {}} assert base_dict == { @@ -518,56 +514,56 @@ def test_tech_dict_from_to_from(self, source_obj): "foo3": {"to": "bar1"}, } - def test_tech_dict_empty(self, source_obj): + def test_tech_dict_empty(self, table_obj): df_dict = {"available_area": {"foo1": 1}} - tech_dict, base_dict = source_obj(df_dict, rows="nodes").tech_dict() + tech_dict, base_dict = table_obj(df_dict, rows="nodes").tech_dict() assert not tech_dict assert not base_dict -class TestDataSourceNodeDict: +class TestDataTableNodeDict: @pytest.fixture(scope="class") - def source_obj(self, init_config): - def _source_obj(df_dict, rows=["nodes", "techs"]): + def table_obj(self, init_config): + def _table_obj(df_dict, rows=["nodes", "techs"]): df = pd.DataFrame(df_dict) - source_dict = {"source": "df", "rows": rows, "columns": "parameters"} - ds = data_sources.DataSource( - init_config, "ds_name", source_dict, data_source_dfs={"df": df} + table_dict = {"data": "df", "rows": rows, "columns": "parameters"} + ds = data_tables.DataTable( + init_config, "ds_name", table_dict, data_table_dfs={"df": df} ) return ds - return _source_obj + return _table_obj - def test_node_dict_from_one_param(self, source_obj): + def test_node_dict_from_one_param(self, table_obj): df_dict = {"available_area": {("foo1", "bar1"): 1, ("foo2", "bar2"): 2}} tech_dict = calliope.AttrDict({"bar1": {}, "bar2": {}}) - node_dict = source_obj(df_dict).node_dict(tech_dict) + node_dict = table_obj(df_dict).node_dict(tech_dict) assert node_dict == { "foo1": {"techs": {"bar1": None}}, "foo2": {"techs": {"bar2": None}}, } - def test_node_dict_from_two_param(self, source_obj): + def test_node_dict_from_two_param(self, table_obj): df_dict = { "available_area": {("foo1", "bar1"): 1, ("foo1", "bar2"): 2}, "other_param": {("foo2", "bar2"): 1}, } tech_dict = calliope.AttrDict({"bar1": {}, "bar2": {}}) - node_dict = source_obj(df_dict).node_dict(tech_dict) + node_dict = table_obj(df_dict).node_dict(tech_dict) assert node_dict == { "foo1": {"techs": {"bar1": None, "bar2": None}}, "foo2": {"techs": {"bar2": None}}, } - def test_node_dict_extra_dim_in_param(self, source_obj): + def test_node_dict_extra_dim_in_param(self, table_obj): df_dict = { "available_area": {("foo1", "bar1", "baz1"): 1, ("foo2", "bar2", "baz2"): 2} } tech_dict = calliope.AttrDict({"bar1": {}, "bar2": {}}) - node_dict = source_obj(df_dict, rows=["nodes", "techs", "carriers"]).node_dict( + node_dict = table_obj(df_dict, rows=["nodes", "techs", "carriers"]).node_dict( tech_dict ) @@ -576,12 +572,12 @@ def test_node_dict_extra_dim_in_param(self, source_obj): "foo2": {"techs": {"bar2": None}}, } - def test_node_dict_node_not_in_ds(self, source_obj): + def test_node_dict_node_not_in_ds(self, table_obj): node_tech_df_dict = {"my_param": {("foo1", "bar1"): 1, ("foo1", "bar2"): 2}} node_df_dict = {"available_area": {"foo2": 1}} tech_dict = calliope.AttrDict({"bar1": {}, "bar2": {}}) - node_tech_ds = source_obj(node_tech_df_dict) - node_ds = source_obj(node_df_dict, rows="nodes") + node_tech_ds = table_obj(node_tech_df_dict) + node_ds = table_obj(node_df_dict, rows="nodes") node_tech_ds.dataset = node_tech_ds.dataset.merge(node_ds.dataset) node_dict = node_tech_ds.node_dict(tech_dict) @@ -590,23 +586,23 @@ def test_node_dict_node_not_in_ds(self, source_obj): "foo2": {"techs": {}}, } - def test_node_dict_no_info(self, source_obj): + def test_node_dict_no_info(self, table_obj): df_dict = {"param": {"foo1": 1, "foo2": 2}} tech_dict = calliope.AttrDict( {"bar1": {"base_tech": "transmission"}, "bar2": {}} ) - node_dict = source_obj(df_dict, rows="techs").node_dict(tech_dict) + node_dict = table_obj(df_dict, rows="techs").node_dict(tech_dict) assert node_dict == {} - def test_transmission_tech_with_nodes(self, source_obj): + def test_transmission_tech_with_nodes(self, table_obj): df_dict = {"param": {("foo1", "bar1"): 1, ("foo2", "bar2"): 2}} tech_dict = calliope.AttrDict( {"bar1": {"base_tech": "transmission"}, "bar2": {}} ) with pytest.raises(calliope.exceptions.ModelError) as excinfo: - source_obj(df_dict).node_dict(tech_dict) + table_obj(df_dict).node_dict(tech_dict) check_error_or_warning( excinfo, diff --git a/tests/test_preprocess_model_data.py b/tests/test_preprocess_model_data.py index db37787ce..f15393e24 100644 --- a/tests/test_preprocess_model_data.py +++ b/tests/test_preprocess_model_data.py @@ -5,43 +5,43 @@ import pandas as pd import pytest import xarray as xr + from calliope import exceptions from calliope.attrdict import AttrDict -from calliope.preprocess import data_sources, load +from calliope.preprocess import data_tables, scenarios from calliope.preprocess.model_data import ModelDataFactory from .common.util import build_test_model as build_model from .common.util import check_error_or_warning -@pytest.fixture() +@pytest.fixture def model_def(): - filepath = Path(__file__).parent / "common" / "test_model" / "model.yaml" - model_def_dict, model_def_path, _ = load.load_model_definition( - filepath.as_posix(), scenario="simple_supply,empty_tech_node" + model_def_path = Path(__file__).parent / "common" / "test_model" / "model.yaml" + model_dict = AttrDict.from_yaml(model_def_path) + model_def_override, _ = scenarios.load_scenario_overrides( + model_dict, scenario="simple_supply,empty_tech_node" ) - return model_def_dict, model_def_path + return model_def_override, model_def_path -@pytest.fixture() +@pytest.fixture def data_source_list(model_def, init_config): model_def_dict, model_def_path = model_def return [ - data_sources.DataSource( - init_config, source_name, source_dict, {}, model_def_path - ) - for source_name, source_dict in model_def_dict.pop("data_sources", {}).items() + data_tables.DataTable(init_config, source_name, source_dict, {}, model_def_path) + for source_name, source_dict in model_def_dict.pop("data_tables", {}).items() ] -@pytest.fixture() +@pytest.fixture def init_config(config_defaults, model_def): model_def_dict, _ = model_def config_defaults.union(model_def_dict.pop("config"), allow_override=True) return config_defaults["init"] -@pytest.fixture() +@pytest.fixture def model_data_factory(model_def, init_config, model_defaults): model_def_dict, _ = model_def return ModelDataFactory( @@ -49,13 +49,13 @@ def model_data_factory(model_def, init_config, model_defaults): ) -@pytest.fixture() +@pytest.fixture def model_data_factory_w_params(model_data_factory: ModelDataFactory): model_data_factory.add_node_tech_data() return model_data_factory -@pytest.fixture() +@pytest.fixture def my_caplog(caplog): caplog.set_level(logging.DEBUG, logger="calliope.preprocess") return caplog @@ -854,7 +854,7 @@ def test_raise_error_on_transmission_tech_in_node( class TestTopLevelParams: - @pytest.fixture() + @pytest.fixture def run_and_test(self, model_data_factory_w_params): def _run_and_test(in_dict, out_dict, dims): model_data_factory_w_params.model_definition["parameters"] = { @@ -880,7 +880,7 @@ def test_parameter_already_exists(self): build_model({"parameters.flow_out_eff": 1}, "simple_supply,two_hours") assert check_error_or_warning( excinfo, - "A parameter with this name has already been defined in a data source or at a node/tech level.", + "A parameter with this name has already been defined in a data table or at a node/tech level.", ) @pytest.mark.parametrize("val", [1, 1.0, np.inf, "foo"]) diff --git a/tests/test_preprocess_model_math.py b/tests/test_preprocess_model_math.py new file mode 100644 index 000000000..46af363e8 --- /dev/null +++ b/tests/test_preprocess_model_math.py @@ -0,0 +1,184 @@ +"""Test the model math handler.""" + +import logging +from copy import deepcopy +from pathlib import Path +from random import shuffle + +import pytest + +import calliope +from calliope.exceptions import ModelError +from calliope.preprocess import CalliopeMath + + +@pytest.fixture +def model_math_default(): + return CalliopeMath([]) + + +@pytest.fixture(scope="module") +def def_path(tmp_path_factory): + return tmp_path_factory.mktemp("test_model_math") + + +@pytest.fixture(scope="module") +def user_math(dummy_int): + new_vars = {"variables": {"storage": {"bounds": {"min": dummy_int}}}} + new_constr = { + "constraints": { + "foobar": {"foreach": [], "where": "", "equations": [{"expression": ""}]} + } + } + return calliope.AttrDict(new_vars | new_constr) + + +@pytest.fixture(scope="module") +def user_math_path(def_path, user_math): + file_path = def_path / "custom-math.yaml" + user_math.to_yaml(def_path / file_path) + return "custom-math.yaml" + + +@pytest.mark.parametrize("invalid_obj", [1, "foo", {"foo": "bar"}, True, CalliopeMath]) +def test_invalid_eq(model_math_default, invalid_obj): + """Comparisons should not work with invalid objects.""" + assert not model_math_default == invalid_obj + + +@pytest.mark.parametrize("modes", [[], ["storage_inter_cluster"]]) +class TestInit: + def test_init_order(self, caplog, modes, model_math_default): + """Math should be added in order, keeping defaults.""" + with caplog.at_level(logging.INFO): + model_math = CalliopeMath(modes) + assert all( + f"Math preprocessing | added file '{i}'." in caplog.messages for i in modes + ) + assert model_math_default.history + modes == model_math.history + + def test_init_order_user_math( + self, modes, user_math_path, def_path, model_math_default + ): + """User math order should be respected.""" + modes = modes + [user_math_path] + shuffle(modes) + model_math = CalliopeMath(modes, def_path) + assert model_math_default.history + modes == model_math.history + + def test_init_user_math_invalid_relative(self, modes, user_math_path): + """Init with user math should fail if model definition path is not given for a relative path.""" + with pytest.raises(ModelError): + CalliopeMath(modes + [user_math_path]) + + def test_init_user_math_valid_absolute(self, modes, def_path, user_math_path): + """Init with user math should succeed if user math is an absolute path.""" + abs_path = str((def_path / user_math_path).absolute()) + model_math = CalliopeMath(modes + [abs_path]) + assert model_math.in_history(abs_path) + + def test_init_dict(self, modes, user_math_path, def_path): + """Math dictionary reload should lead to no alterations.""" + modes = modes + [user_math_path] + shuffle(modes) + model_math = CalliopeMath(modes, def_path) + saved = dict(model_math) + reloaded = CalliopeMath.from_dict(saved) + assert model_math == reloaded + + +class TestMathLoading: + @pytest.fixture(scope="class") + def pre_defined_mode(self): + return "storage_inter_cluster" + + @pytest.fixture + def model_math_w_mode(self, model_math_default, pre_defined_mode): + model_math_default._add_pre_defined_file(pre_defined_mode) + return model_math_default + + @pytest.fixture + def model_math_w_mode_user(self, model_math_w_mode, user_math_path, def_path): + model_math_w_mode._add_user_defined_file(user_math_path, def_path) + return model_math_w_mode + + @pytest.fixture(scope="class") + def predefined_mode_data(self, pre_defined_mode): + path = Path(calliope.__file__).parent / "math" / f"{pre_defined_mode}.yaml" + math = calliope.AttrDict.from_yaml(path) + return math + + def test_predefined_add(self, model_math_w_mode, predefined_mode_data): + """Added mode should be in data.""" + flat = predefined_mode_data.as_dict_flat() + assert all(model_math_w_mode.data.get_key(i) == flat[i] for i in flat.keys()) + + def test_predefined_add_history(self, pre_defined_mode, model_math_w_mode): + """Added modes should be recorded.""" + assert model_math_w_mode.in_history(pre_defined_mode) + + def test_predefined_add_duplicate(self, pre_defined_mode, model_math_w_mode): + """Adding the same mode twice is invalid.""" + with pytest.raises(ModelError): + model_math_w_mode._add_pre_defined_file(pre_defined_mode) + + @pytest.mark.parametrize("invalid_mode", ["foobar", "foobar.yaml", "operate.yaml"]) + def test_predefined_add_fail(self, invalid_mode, model_math_w_mode): + """Requesting inexistent modes or modes with suffixes should fail.""" + with pytest.raises(ModelError): + model_math_w_mode._add_pre_defined_file(invalid_mode) + + def test_user_math_add( + self, model_math_w_mode_user, predefined_mode_data, user_math + ): + """Added user math should be in data.""" + expected_math = deepcopy(predefined_mode_data) + expected_math.union(user_math, allow_override=True) + flat = expected_math.as_dict_flat() + assert all( + model_math_w_mode_user.data.get_key(i) == flat[i] for i in flat.keys() + ) + + def test_user_math_add_history(self, model_math_w_mode_user, user_math_path): + """Added user math should be recorded.""" + assert model_math_w_mode_user.in_history(user_math_path) + + def test_repr(self, model_math_w_mode): + expected_repr_content = """Calliope math definition dictionary with: + 4 decision variable(s) + 0 global expression(s) + 9 constraint(s) + 0 piecewise constraint(s) + 0 objective(s) + """ + assert expected_repr_content == str(model_math_w_mode) + + def test_add_dict(self, model_math_w_mode, model_math_w_mode_user, user_math): + model_math_w_mode.add(user_math) + assert model_math_w_mode_user == model_math_w_mode + + def test_user_math_add_duplicate( + self, model_math_w_mode_user, user_math_path, def_path + ): + """Adding the same user math file twice should fail.""" + with pytest.raises(ModelError): + model_math_w_mode_user._add_user_defined_file(user_math_path, def_path) + + @pytest.mark.parametrize("invalid_mode", ["foobar", "foobar.yaml", "operate.yaml"]) + def test_user_math_add_fail(self, invalid_mode, model_math_w_mode_user, def_path): + """Requesting inexistent user modes should fail.""" + with pytest.raises(ModelError): + model_math_w_mode_user._add_user_defined_file(invalid_mode, def_path) + + +class TestValidate: + def test_validate_math_fail(self): + """Invalid math keys must trigger a failure.""" + model_math = CalliopeMath([{"foo": "bar"}]) + with pytest.raises(ModelError): + model_math.validate() + + def test_math_default(self, caplog, model_math_default): + with caplog.at_level(logging.INFO): + model_math_default.validate() + assert "Math preprocessing | validated math against schema." in caplog.messages diff --git a/tests/test_preprocess_time.py b/tests/test_preprocess_time.py index 04cbbc83b..3272872d3 100644 --- a/tests/test_preprocess_time.py +++ b/tests/test_preprocess_time.py @@ -1,5 +1,6 @@ import pandas as pd import pytest # noqa: F401 + from calliope import AttrDict, exceptions from .common.util import build_test_model @@ -16,9 +17,9 @@ def test_change_date_format(self): override = AttrDict.from_yaml_string( """ config.init.time_format: "%d/%m/%Y %H:%M" - data_sources: - demand_elec.source: data_sources/demand_heat_diff_dateformat.csv - demand_heat.source: data_sources/demand_heat_diff_dateformat.csv + data_tables: + demand_elec.data: data_tables/demand_heat_diff_dateformat.csv + demand_heat.data: data_tables/demand_heat_diff_dateformat.csv """ ) model = build_test_model(override_dict=override, scenario="simple_conversion") @@ -30,7 +31,7 @@ def test_change_date_format(self): def test_incorrect_date_format_one(self): # should fail: wrong dateformat input for one file override = AttrDict.from_yaml_string( - "data_sources.demand_elec.source: data_sources/demand_heat_diff_dateformat.csv" + "data_tables.demand_elec.data: data_tables/demand_heat_diff_dateformat.csv" ) with pytest.raises(exceptions.ModelError): @@ -46,7 +47,7 @@ def test_incorrect_date_format_multi(self): def test_incorrect_date_format_one_value_only(self): # should fail: one value wrong in file override = AttrDict.from_yaml_string( - "data_sources.test_demand_elec.source: data_sources/demand_heat_wrong_dateformat.csv" + "data_tables.test_demand_elec.data: data_tables/demand_heat_wrong_dateformat.csv" ) # check in output error that it points to: 07/01/2005 10:00:00 with pytest.raises(exceptions.ModelError): @@ -60,12 +61,12 @@ class TestClustering: def clustered_model(self, request): cluster_init = { "time_subset": ["2005-01-01", "2005-01-04"], - "time_cluster": f"data_sources/{request.param}.csv", + "time_cluster": f"data_tables/{request.param}.csv", } if "diff_dateformat" in request.param: cluster_init["override_dict"] = { - "data_sources": { - "demand_elec.source": "data_sources/demand_heat_diff_dateformat.csv" + "data_tables": { + "demand_elec.data": "data_tables/demand_heat_diff_dateformat.csv" } } cluster_init["time_format"] = "%d/%m/%Y %H:%M" @@ -110,7 +111,7 @@ def test_cluster_datesteps(self, clustered_model): @pytest.mark.parametrize( "var", [ - "lookup_cluster_first_timestep", + "cluster_first_timestep", "lookup_cluster_last_timestep", "lookup_datestep_cluster", "lookup_datestep_last_cluster_timestep", @@ -126,7 +127,7 @@ def test_resampling_to_6h_then_clustering(self): scenario="simple_supply", time_subset=["2005-01-01", "2005-01-04"], time_resample="6h", - time_cluster="data_sources/cluster_days.csv", + time_cluster="data_tables/cluster_days.csv", ) dtindex = pd.DatetimeIndex( @@ -151,7 +152,7 @@ def test_15min_resampling_to_6h(self): # The data is identical for '2005-01-01' and '2005-01-03' timesteps, # it is only different for '2005-01-02' override = AttrDict.from_yaml_string( - "data_sources.demand_elec.source: data_sources/demand_elec_15mins.csv" + "data_tables.demand_elec.data: data_tables/demand_elec_15mins.csv" ) model = build_test_model(override, scenario="simple_supply", time_resample="6h") @@ -178,7 +179,7 @@ def test_15min_to_2h_resampling_to_2h(self): CSV has daily timeseries varying from 15min to 2h resolution, resample all to 2h """ override = AttrDict.from_yaml_string( - "data_sources.demand_elec.source: data_sources/demand_elec_15T_to_2h.csv" + "data_tables.demand_elec.data: data_tables/demand_elec_15T_to_2h.csv" ) model = build_test_model( @@ -213,12 +214,12 @@ def test_different_ts_resolutions_resampling_to_6h(self): # it is only different for '2005-01-02' override = AttrDict.from_yaml_string( """ - data_sources: + data_tables: demand_elec: select: nodes: a demand_elec_15m: - source: data_sources/demand_elec_15mins.csv + data: data_tables/demand_elec_15mins.csv rows: timesteps columns: nodes select: