From 1639bad014be6ceaf66b24b3c04a06d957f25c19 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 21 Feb 2023 15:02:39 +0100 Subject: [PATCH 01/33] post-tag bump From 21494bb0fe4c92a1c53ce00520c7ab1b5daf3304 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 13 Jun 2023 13:34:25 +0200 Subject: [PATCH 02/33] Ordering the API functions according to the order in the C++ API of preCICE --- cyprecice/cyprecice.pyx | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 9bb93e69..54951517 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -242,41 +242,41 @@ cdef class Interface: """ return self.thisptr.requiresInitialData () - def requires_reading_checkpoint (self): + def requires_writing_checkpoint (self): """ - Checks if the participant is required to read an iteration checkpoint. + Checks if the participant is required to write an iteration checkpoint. - If true, the participant is required to read an iteration checkpoint before + If true, the participant is required to write an iteration checkpoint before calling advance(). - - preCICE refuses to proceed if reading a checkpoint is required, + + preCICE refuses to proceed if writing a checkpoint is required, but this method isn't called prior to advance(). Notes ----- - This function returns false before the first call to advance(). - Previous calls: initialize() has been called """ - return self.thisptr.requiresReadingCheckpoint () + return self.thisptr.requiresWritingCheckpoint () - def requires_writing_checkpoint (self): + def requires_reading_checkpoint (self): """ - Checks if the participant is required to write an iteration checkpoint. + Checks if the participant is required to read an iteration checkpoint. - If true, the participant is required to write an iteration checkpoint before + If true, the participant is required to read an iteration checkpoint before calling advance(). - - preCICE refuses to proceed if writing a checkpoint is required, + + preCICE refuses to proceed if reading a checkpoint is required, but this method isn't called prior to advance(). Notes ----- + This function returns false before the first call to advance(). + Previous calls: initialize() has been called """ - return self.thisptr.requiresWritingCheckpoint () + return self.thisptr.requiresReadingCheckpoint () # mesh access From f8820eb6130d3ffd38713d913872a80ee0f50338 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 13 Jun 2023 14:06:11 +0200 Subject: [PATCH 03/33] Minor documentation edits --- cyprecice/cyprecice.pyx | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 54951517..52289a5f 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -506,7 +506,7 @@ cdef class Interface: def set_mesh_triangle (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id): """ - Sets mesh triangle from edge IDs + Set a mesh triangle from edge IDs Parameters ---------- @@ -569,8 +569,7 @@ cdef class Interface: def set_mesh_quad (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id): """ - Sets mesh Quad from vertex IDs. - WARNING: Quads are not fully implemented yet. + Set a mesh Quad from vertex IDs. Parameters ---------- From a670620f46cb07c11cb0e7b7729f1f07116cec76 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 13 Jun 2023 14:32:06 +0200 Subject: [PATCH 04/33] Format the precice-config for the solverdummy and use standard names in the solverdummy --- examples/solverdummy/precice-config.xml | 103 +++++++++++++----------- examples/solverdummy/solverdummy.py | 12 +-- 2 files changed, 61 insertions(+), 54 deletions(-) diff --git a/examples/solverdummy/precice-config.xml b/examples/solverdummy/precice-config.xml index e5de6fc8..7f199aee 100644 --- a/examples/solverdummy/precice-config.xml +++ b/examples/solverdummy/precice-config.xml @@ -1,52 +1,59 @@ - - - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/solverdummy/solverdummy.py b/examples/solverdummy/solverdummy.py index 8d094f91..3af107e9 100644 --- a/examples/solverdummy/solverdummy.py +++ b/examples/solverdummy/solverdummy.py @@ -20,14 +20,14 @@ participant_name = args.participantName if participant_name == 'SolverOne': - write_data_name = 'dataOne' - read_data_name = 'dataTwo' - mesh_name = 'MeshOne' + write_data_name = 'Data-One' + read_data_name = 'Data-Two' + mesh_name = 'SolverOne-Mesh' if participant_name == 'SolverTwo': - read_data_name = 'dataOne' - write_data_name = 'dataTwo' - mesh_name = 'MeshTwo' + read_data_name = 'Data-One' + write_data_name = 'Data-Two' + mesh_name = 'SolverTwo-Mesh' num_vertices = 3 # Number of vertices From c6649b452bb120646a1e3fbab4be1f3fe94f7f58 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Tue, 13 Jun 2023 14:54:15 +0200 Subject: [PATCH 05/33] Minor formatting changes. --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index f07343dc..3ac7a608 100644 --- a/README.md +++ b/README.md @@ -94,11 +94,11 @@ $ python3 setup.py install --user ``` **Options:** - - `--include-dirs=`, default: `''` + - `--include-dirs=`, default: `''` Path to the headers of preCICE, point to the sources `$PRECICE_ROOT/src`, or the your custom install prefix `$prefix/include`. - + **NOTES:** - + - If you have built preCICE using CMake, you can pass the path to the CMake binary directory using `--library-dirs`. - It is recommended to use preCICE as a shared library here. @@ -145,7 +145,7 @@ $ python3 -c "import precice" # Usage -You can find the documentation of the implemented interface in the file `precice.pyx`. For an example of how `pyprecice` can be used please refer to the [1D elastic tube example](https://precice.org/tutorials-elastic-tube-1d.html#python). +You can find the documentation of the implemented interface in the file `precice.pyx`. For an example of how `pyprecice` can be used please refer to the [1D elastic tube example](https://precice.org/tutorials-elastic-tube-1d.html#python). **Note** The python package that is installed is called `pyprecice`. It provides the python module `precice` that can be use in your code via `import precice`, for example. @@ -187,7 +187,7 @@ Collecting pyprecice error: command 'x86_64-linux-gnu-gcc' failed with exit status 1 ---------------------------------------- - Failed building wheel for pyprecice + Failed building wheel for pyprecice ``` Please try to install `python3-dev`. E.g. via `apt install python3-dev`. Please make sure that you use the correct version (e.g. `python3.5-dev` or `python3.6-dev`). You can check your version via `python3 --version`. @@ -223,7 +223,7 @@ module load mpi.intel/2018_gcc module load cmake/3.12.1 ``` At the time of this writing `module load boost/1.68.0` is no longer available. Instead -boost 1.65.1 was installed per the `boost and yaml-cpp` guide above. +boost 1.65.1 was installed per the `boost and yaml-cpp` guide above. In order to have the right python dependencies, a packaged conda environment was transferred to SuperMUC. The following dependencies were installed: @@ -250,7 +250,7 @@ Then, navigate to the python_future bindings script. cd /path/to/precice/src/precice/bindings/python_future ``` Append the following to the head of the file to allow Python2 to run Python3 code. Note that -importing `unicode_literals` from `future` will cause errors in `setuptools` methods as string literals +importing `unicode_literals` from `future` will cause errors in `setuptools` methods as string literals in code are interpreted as `unicode` with this import. ``` from __future__ import (absolute_import, division, From 562616ba11d82d014f865d5912453407c7f58e61 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Tue, 13 Jun 2023 16:00:02 +0200 Subject: [PATCH 06/33] Lint markdown files and update files correspondingly. (#180) * Lint markdown files and update correspondingly. * Minor update. --- .github/workflows/check-markdown.yml | 14 +++ .markdownlint.json | 7 ++ README.md | 135 ++++++++++++++++----------- docs/MigrationGuide.md | 28 ++++-- docs/ReleaseGuide.md | 33 +++---- examples/solverdummy/README.md | 13 ++- 6 files changed, 143 insertions(+), 87 deletions(-) create mode 100644 .github/workflows/check-markdown.yml create mode 100644 .markdownlint.json diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml new file mode 100644 index 00000000..63a0df9a --- /dev/null +++ b/.github/workflows/check-markdown.yml @@ -0,0 +1,14 @@ +name: Lint docs +on: [push, pull_request] +jobs: + check_md: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v2 + - name: Lint markdown files (markdownlint) + uses: articulate/actions-markdownlint@v1 + with: + config: .markdownlint.json + files: '.' + ignore: changelog-entries diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 00000000..e53595a9 --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,7 @@ +{ + "MD013": false, + "MD014": false, + "MD024": false, + "MD034": false, + "MD033": false +} diff --git a/README.md b/README.md index 3ac7a608..dbfddff0 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,16 @@ -⚠️ The latest version of the documentation for the python bindings can be found on [precice.org](https://precice.org/installation-bindings-python.html). The information from this `README` is currently under revision and will be moved ⚠️ +# Python language bindings for the C++ library preCICE -Python language bindings for the C++ library preCICE ----------------------------------------------------- +⚠️ The latest version of the documentation for the python bindings can be found on [precice.org](https://precice.org/installation-bindings-python.html). The information from this `README` is currently under revision and will be moved ⚠️ [![Upload Python Package](https://github.com/precice/python-bindings/workflows/Upload%20Python%20Package/badge.svg?branch=master)](https://pypi.org/project/pyprecice/) This package provides python language bindings for the C++ library [preCICE](https://github.com/precice/precice). Note that the first three digits of the version number of the bindings indicate the preCICE version that the bindings support. The last digit represents the version of the bindings. Example: `v2.0.0.1` and `v2.0.0.2` of the bindings represent versions `1` and `2` of the bindings that are compatible with preCICE `v2.0.0`. -# User documentation +## User documentation Please refer to [the preCICE documentation](https://www.precice.org/installation-bindings-python.html) for information on how to install and use the python bindings. Information below is intended for advanced users and developers. -# Required dependencies +## Required dependencies **preCICE**: Refer to [the preCICE documentation](https://precice.org/installation-overview.html) for information on building and installation. @@ -19,143 +18,154 @@ Please refer to [the preCICE documentation](https://www.precice.org/installation **MPI**: `mpi4py` requires MPI to be installed on your system. -# Installing the package +## Installing the package We recommend using pip3 (version 19.0.0 or newer required) for the sake of simplicity. You can check your pip3 version via `pip3 --version`. To update pip3, use the following line: -``` +```bash $ pip3 install --user --upgrade pip ``` -## Using pip3 +### Using pip3 -### preCICE system installs +#### preCICE system installs For system installs of preCICE, installation works out of the box. There are different ways how pip can be used to install pyprecice. pip will fetch cython and other build-time dependencies, compile the bindings and finally install the package pyprecice. * (recommended) install [pyprecice from PyPI](https://pypi.org/project/pyprecice/) - ``` + ```bash $ pip3 install --user pyprecice ``` * provide the link to this repository to pip (replace `` with the branch you want to use, preferably `master` or `develop`) - ``` + ```bash $ pip3 install --user https://github.com/precice/python-bindings/archive/.zip ``` * if you already cloned this repository, execute the following command from this directory: - ``` + ```bash $ pip3 install --user . ``` + *note the dot at the end of the line* -### preCICE at custom location (setting PATHS) +#### preCICE at custom location (setting PATHS) If preCICE (the C++ library) was installed in a custom prefix, or only built but not installed at all, you have to extend the following environment variables: -- `LIBRARY_PATH`, `LD_LIBRARY_PATH` to the library location, or `$prefix/lib` -- `CPATH` either to the `src` directory or the `$prefix/include` +* `LIBRARY_PATH`, `LD_LIBRARY_PATH` to the library location, or `$prefix/lib` +* `CPATH` either to the `src` directory or the `$prefix/include` The preCICE documentation provides more informaiton on [linking preCICE](https://precice.org/installation-linking.html). -## Using Spack +### Using Spack You can also install the python language bindings for preCICE via Spack by installing the Spack package `py-pyprecice`. Refer to [our installation guide for preCICE via Spack](https://precice.org/installation-spack.html) for getting started with Spack. -## Using setup.py +### Using setup.py -### preCICE system installs +#### preCICE system installs In this directory, execute: -``` + +```bash $ python3 setup.py install --user ``` -### preCICE at custom location (setting PATHS) +#### preCICE at custom location (setting PATHS) see above. Then run -``` + +```bash $ python3 setup.py install --user ``` -### preCICE at custom location (explicit include path, library path) +#### preCICE at custom location (explicit include path, library path) 1. Install cython and other dependencies via pip3 - ``` + + ```bash $ pip3 install --user setuptools wheel cython packaging numpy ``` + 2. Open terminal in this folder. 3. Build the bindings - ``` + + ```bash $ python3 setup.py build_ext --include-dirs=$PRECICE_ROOT/src --library-dirs=$PRECICE_ROOT/build/last ``` - **Options:** - - `--include-dirs=`, default: `''` - Path to the headers of preCICE, point to the sources `$PRECICE_ROOT/src`, or the your custom install prefix `$prefix/include`. + **Options:** + * `--include-dirs=`, default: `''` + Path to the headers of preCICE, point to the sources `$PRECICE_ROOT/src`, or the your custom install prefix `$prefix/include`. - **NOTES:** + **NOTES:** - - If you have built preCICE using CMake, you can pass the path to the CMake binary directory using `--library-dirs`. - - It is recommended to use preCICE as a shared library here. + * If you have built preCICE using CMake, you can pass the path to the CMake binary directory using `--library-dirs`. + * It is recommended to use preCICE as a shared library here. 4. Install the bindings - ``` + + ```bash $ python3 setup.py install --user ``` -5. Clean-up _optional_ - ``` +5. Clean-up *optional* + + ```bash $ python3 setup.py clean --all ``` -# Test the installation +## Test the installation Update `LD_LIBRARY_PATH` such that python can find `precice.so` -``` +```bash $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PRECICE_ROOT/build/last ``` Run the following to test the installation: -``` +```bash $ python3 -c "import precice" ``` -## Unit tests +### Unit tests -1. Clean-up __mandatory__ (because we must not link against the real `precice.so`, but we use a mocked version) - ``` +1. Clean-up **mandatory** (because we must not link against the real `precice.so`, but we use a mocked version) + + ```bash $ python3 setup.py clean --all ``` 2. Set `CPLUS_INCLUDE_PATH` (we cannot use `build_ext` and the `--include-dirs` option here) - ``` + + ```bash $ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:$PRECICE_ROOT/src ``` 3. Run tests with - ``` + + ```bash $ python3 setup.py test ``` -# Usage +## Usage You can find the documentation of the implemented interface in the file `precice.pyx`. For an example of how `pyprecice` can be used please refer to the [1D elastic tube example](https://precice.org/tutorials-elastic-tube-1d.html#python). **Note** The python package that is installed is called `pyprecice`. It provides the python module `precice` that can be use in your code via `import precice`, for example. -# Troubleshooting & miscellaneous +## Troubleshooting & miscellaneous ### preCICE is not found The following error shows up during installation, if preCICE is not found: -``` +```bash /tmp/pip-install-d_fjyo1h/pyprecice/precice.cpp:643:10: fatal error: precice/SolverInterface.hpp: No such file or directory 643 | #include "precice/SolverInterface.hpp" | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -178,7 +188,7 @@ In case the compilation fails with `shared_ptr.pxd not found` messages, check if ### `Python.h` missing -``` +```bash $ python3 -m pip install pyprecice Collecting pyprecice ... @@ -194,7 +204,7 @@ Please try to install `python3-dev`. E.g. via `apt install python3-dev`. Please ### `libprecice.so` is not found at runtime -``` +```bash $ python3 -c "import precice" Traceback (most recent call last): File "", line 1, in @@ -216,43 +226,53 @@ If you want to use the old experimental python bindings (released with preCICE v This guide provides steps to install python bindings for precice-1.6.1 for a conda environment Python 2.7.17 on the CoolMUC. Note that preCICE no longer supports Python 2 after v1.4.0. Hence, some modifications to the python setup code was necessary. Most steps are similar if not identical to the basic guide without petsc or python above. This guide assumes that the Eigen dependencies have already been installed. Load the prerequisite libraries: -``` + +```bash module load gcc/7 module unload mpi.intel module load mpi.intel/2018_gcc module load cmake/3.12.1 ``` + At the time of this writing `module load boost/1.68.0` is no longer available. Instead boost 1.65.1 was installed per the `boost and yaml-cpp` guide above. In order to have the right python dependencies, a packaged conda environment was transferred to SuperMUC. The following dependencies were installed: -- numpy -- mpi4py + +* numpy +* mpi4py With the python environment active, we have to feed the right python file directories to the cmake command. Note that -DPYTHON_LIBRARY expects a python shared library. You can likely modify the version to fit what is required. -``` + +```bash mkdir build && cd build cmake -DBUILD_SHARED_LIBS=ON -DPRECICE_PETScMapping=OFF -DPRECICE_PythonActions=ON -DCMAKE_INSTALL_PREFIX=/path/to/precice -DCMAKE_BUILD_TYPE=Debug .. -DPYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") -DPYTHON_LIBRARY=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR')+'/libpython2.7.so')") -DNumPy_INCLUDE_DIR=$(python -c "import numpy; print(numpy.get_include())") make -j 12 make install ``` + After installing, make sure you add the preCICE installation paths to your `.bashrc`, so that other programs can find it: -``` + +```bash export PRECICE_ROOT="path/to/precice_install" export PKG_CONFIG_PATH="path/to/precice_install/lib/pkgconfig:${PKG_CONFIG_PATH}" export CPLUS_INCLUDE_PATH="path/to/precice_install/include:${CPLUS_INCLUDE_PATH}" export LD_LIBRARY_PATH="path/to/precice_install/lib:${LD_LIBRARY_PATH}" ``` + Then, navigate to the python_future bindings script. -``` + +```bash cd /path/to/precice/src/precice/bindings/python_future ``` + Append the following to the head of the file to allow Python2 to run Python3 code. Note that importing `unicode_literals` from `future` will cause errors in `setuptools` methods as string literals in code are interpreted as `unicode` with this import. -``` + +```python from __future__ import (absolute_import, division, print_function) from builtins import ( @@ -261,14 +281,18 @@ from builtins import ( pow, round, super, filter, map, zip) ``` + Modify `mpicompiler_default = "mpic++"` to `mpicompiler_default = "mpicxx"` in line 100. Run the setup file using the default Python 2.7.17. -``` + +```bash python setup.py install --user ``` + ### ValueError while importing preCICE + If you face the error: ```bash @@ -281,8 +305,7 @@ make sure that you are using an up-to-date version of NumPy. You can update NumP pip3 install numpy --upgrade ``` - -# Contributors +## Contributors * [Benjamin Rodenberg](https://github.com/BenjaminRodenberg) * [Ishaan Desai](https://github.com/IshaanDesai) diff --git a/docs/MigrationGuide.md b/docs/MigrationGuide.md index b0824070..46d7d610 100644 --- a/docs/MigrationGuide.md +++ b/docs/MigrationGuide.md @@ -1,11 +1,10 @@ -Migration Guide for Python language bindings for preCICE version 2.0 ------------------------------------- +# Migration Guide for Python language bindings for preCICE version 2.0 -# Steps to move from old Python API to the new API +## Steps to move from old Python API to the new API ### 1. Python language bindings moved to a new repository in the preCICE Project -Previously, the Python language bindings were part of the repository [`precice/precice`](https://github.com/precice/precice). +Previously, the Python language bindings were part of the repository [`precice/precice`](https://github.com/precice/precice). The bindings have now been moved to the independent repository [`precice/python-bindings`](https://github.com/precice/python-bindings). The installation procedure is the same as before. Please refer to the [README](https://github.com/precice/python-bindings/blob/develop/README.md). @@ -16,13 +15,15 @@ The initialization of the `Interface` object now initializes the solver and also file provided by the user. **Old:** Before preCICE Version 2 you had to call: -``` + +```python interface = precice.Interface(solverName, processRank, processSize) interface.configure(configFileName) ``` **New:** The two commands have now been combined into a single one: -``` + +```python interface = precice.Interface(solverName, configFileName, processRank, processSize) ``` @@ -33,14 +34,17 @@ Unlike the old bindings, API calls now do not need the array size to be passed a For example let us consider the call `write_block_vector_data`: **Old:** The previous call was: -``` + +```python interface.write_block_vector_data(writeDataID, writeDataSize, vertexIDs, writeDataArray) ``` **New:** The new function call is: -``` + +```python interface.write_block_vector_data(writeDataID, vertexIDs, writeDataArray) ``` + The same change is applied for all other calls which work with arrays of data. ### 4. API functions use a return value, if appropriate @@ -50,16 +54,20 @@ In older versions of the python bindings arrays were modified by the API in a ca For example let us consider the interface function `set_mesh_vertices`. `set_mesh_vertices` is used to register vertices for a mesh and it returns an array of `vertexIDs`. **Old:** The old signature of this function was: -``` + +```python vertexIDs = np.zeros(numberofVertices) interface.set_mesh_vertices(meshID, numberofVertices, grid, vertexIDs) ``` + Note that `vertexIDs` is passed as an argument to the function. **New:** This has now been changed to: -``` + +```python vertexIDs = interface.set_mesh_vertices(meshID, grid) ``` + Here, `vertexIDs` is directly returned by `set_mesh_vertices`. The same change has been applied to the functions `read_block_scalar_data` and `read_block_vector_data`. diff --git a/docs/ReleaseGuide.md b/docs/ReleaseGuide.md index e0c0be86..23c952fa 100644 --- a/docs/ReleaseGuide.md +++ b/docs/ReleaseGuide.md @@ -1,4 +1,5 @@ -## Guide to release new version of python-bindings +# Guide to release new version of python-bindings + The developer who is releasing a new version of the python-bindings is expected to follow this work flow: The release of the `python-bindings` repository is made directly from a release branch called `python-bindings-v2.1.1.1`. This branch is mainly needed to help other developers with testing. @@ -12,7 +13,7 @@ The release of the `python-bindings` repository is made directly from a release * `CHANGELOG.md` on `python-bindings-v2.1.1.1`. * There is no need to bump the version anywhere else, since we use the [python-versioneer](https://github.com/python-versioneer/python-versioneer/) for maintaining the version everywhere else. -4. [Draft a New Release](https://github.com/precice/python-bindings/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v2.1.1.1` or `v2.1.1.1rc1`, compare to [existing tags](https://github.com/precice/python-bindings/tags)). Use `@target:master`. Release title is also the version number (i.e. `v2.1.1.1` or `v2.1.1.1rc1`, compare to [existing releases](https://github.com/precice/python-bindings/tags)). +4. [Draft a New Release](https://github.com/precice/python-bindings/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v2.1.1.1` or `v2.1.1.1rc1`, compare to [existing tags](https://github.com/precice/python-bindings/tags)). Use `@target:master`. Release title is also the version number (i.e. `v2.1.1.1` or `v2.1.1.1rc1`, compare to [existing releases](https://github.com/precice/python-bindings/tags)). * *Note:* We use the [python-versioneer](https://github.com/python-versioneer/python-versioneer/) for maintaining the version. Therefore the tag directly defines the version in all relevant places. * *Note:* If it is a pre-release then the option *This is a pre-release* needs to be selected at the bottom of the page. Use `@target:python-bindings-v2.1.1.1` for a pre-release, since we will never merge a pre-release into master. @@ -28,23 +29,23 @@ The release of the `python-bindings` repository is made directly from a release 7. Add an empty commit on master via `git checkout master`, then `git commit --allow-empty -m "post-tag bump"`. Check that everything is in order via `git log`. Important: The `tag` and `origin/master` should not point to the same commit. For example: -``` -commit 44b715dde4e3194fa69e61045089ca4ec6925fe3 (HEAD -> master, origin/master) -Author: Benjamin Rodenberg -Date: Wed Oct 20 10:52:41 2021 +0200 + ```bash + commit 44b715dde4e3194fa69e61045089ca4ec6925fe3 (HEAD -> master, origin/master) + Author: Benjamin Rodenberg + Date: Wed Oct 20 10:52:41 2021 +0200 + + post-tag bump - post-tag bump + commit d2645cc51f84ad5eda43b9c673400aada8e1505a (tag: v2.3.0.1) + Merge: 2039557 aca2354 + Author: Benjamin Rodenberg + Date: Tue Oct 19 12:57:24 2021 +0200 -commit d2645cc51f84ad5eda43b9c673400aada8e1505a (tag: v2.3.0.1) -Merge: 2039557 aca2354 -Author: Benjamin Rodenberg -Date: Tue Oct 19 12:57:24 2021 +0200 + Merge pull request #132 from precice/python-bindings-v2.3.0.1 - Merge pull request #132 from precice/python-bindings-v2.3.0.1 - - Release v2.3.0.1 -``` + Release v2.3.0.1 + ``` -For more details refer to https://github.com/precice/python-bindings/issues/109 and https://github.com/python-versioneer/python-versioneer/issues/217. + For more details refer to https://github.com/precice/python-bindings/issues/109 and https://github.com/python-versioneer/python-versioneer/issues/217. 8. Update Spack package (refer to `python-bindings/spack/README.md`). diff --git a/examples/solverdummy/README.md b/examples/solverdummy/README.md index db565ada..c1abc86b 100644 --- a/examples/solverdummy/README.md +++ b/examples/solverdummy/README.md @@ -1,15 +1,18 @@ -# Install Dependencies +# Solverdummies + +## Install Dependencies * [preCICE](https://github.com/precice/precice) * [python bindings](https://github.com/precice/python-bindings) * Run in this directory `pip3 install --user -r requirements.txt` -# Run +## Run You can test the dummy solver by coupling two instances with each other. Open two terminals and run - * `python3 solverdummy.py precice-config.xml SolverOne` - * `python3 solverdummy.py precice-config.xml SolverTwo` -# Next Steps +* `python3 solverdummy.py precice-config.xml SolverOne` +* `python3 solverdummy.py precice-config.xml SolverTwo` + +## Next Steps If you want to couple any other solver against this dummy solver be sure to adjust the preCICE configuration (participant names, mesh names, data names etc.) to the needs of your solver, compare our [step-by-step guide for new adapters](https://github.com/precice/precice/wiki/Adapter-Example). From d7a12343eb93bd532f707e960e819bab3d18e51a Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 13 Jun 2023 16:29:04 +0200 Subject: [PATCH 07/33] Compatibility with precice:develop (#179) * Rename SolverInterface to Participant and remove old API functions * Further changes for v3 compatibility * Update to new API. Tests except for gradient mapping are all working. * Fix for empty mesh. * Add missing API functions and small corrections * Fix building issue * Reformating test/Participant.cpp * Fixing test_write_block_scalar_gradient_data_single_float * Fix gradient tests * Import getVersionInformation in the correct way * initialize() does not return anything * Remove merge conflict tag * Mistake in resolving merge conflicts --------- Co-authored-by: Benjamin Rodenberg --- .github/workflows/build-and-test.yml | 2 +- cyprecice/Participant.pxd | 92 +++ cyprecice/SolverInterface.pxd | 114 ---- cyprecice/cyprecice.pxd | 6 +- cyprecice/cyprecice.pyx | 927 ++++++++------------------- examples/solverdummy/solverdummy.py | 40 +- precice/__init__.py | 9 +- setup.py | 2 +- test/Participant.cpp | 338 ++++++++++ test/SolverInterface.cpp | 465 -------------- test/test_bindings_module.py | 338 +++++----- 11 files changed, 920 insertions(+), 1413 deletions(-) create mode 100644 cyprecice/Participant.pxd delete mode 100644 cyprecice/SolverInterface.pxd create mode 100644 test/Participant.cpp delete mode 100644 test/SolverInterface.cpp diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index ff85c57c..2dbbf1a9 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -96,7 +96,7 @@ jobs: run: | git clone --branch develop https://github.com/precice/precice.git precice-core mkdir -p precice - cp precice-core/src/precice/SolverInterface.hpp precice/SolverInterface.hpp + cp precice-core/src/precice/Participant.hpp precice/Participant.hpp cp precice-core/src/precice/Tooling.hpp precice/Tooling.hpp cp precice-core/src/precice/Tooling.hpp precice/Tooling.cpp cd precice-core diff --git a/cyprecice/Participant.pxd b/cyprecice/Participant.pxd new file mode 100644 index 00000000..355237af --- /dev/null +++ b/cyprecice/Participant.pxd @@ -0,0 +1,92 @@ +from libcpp cimport bool +from libcpp.set cimport set +from libcpp.string cimport string +from libcpp.vector cimport vector + +cdef extern from "precice/Participant.hpp" namespace "precice": + cdef cppclass Participant: + # construction and configuration + + Participant (const string&, const string&, int, int) except + + + Participant (const string&, const string&, int, int, void*) except + + + void configure (const string&) + + # steering methods + + void initialize () + + void advance (double computedTimestepLength) + + void finalize() + + # status queries + + int getMeshDimensions(const string& meshName) const + + int getDataDimensions(const string& meshName, const string& dataName) const + + bool isCouplingOngoing() const + + bool isTimeWindowComplete() const + + double getMaxTimeStepSize() const + + bool requiresInitialData() + + bool requiresWritingCheckpoint() + + bool requiresReadingCheckpoint() + + # mesh access + + bool hasMesh (const string& meshName) const + + bool requiresMeshConnectivityFor (const string& meshName) const + + int setMeshVertex (const string& meshName, vector[double] position) + + int getMeshVertexSize (const string& meshName) const + + void setMeshVertices (const string& meshName, vector[double] positions, vector[int]& ids) + + void setMeshEdge (const string& meshName, int firstVertexID, int secondVertexID) + + void setMeshEdges (const string& meshName, vector[int] vertices) + + void setMeshTriangle (const string& meshName, int firstVertexID, int secondVertexID, int thirdVertexID) + + void setMeshTriangles (const string& meshName, vector[int] vertices) + + void setMeshQuad (const string& meshName, int firstVertexID, int secondVertexID, int thirdVertexID, int fourthVertexID) + + void setMeshQuads (const string& meshName, vector[int] vertices) + + void setMeshTetrahedron (const string& meshName, int firstVertexID, int secondVertexID, int thirdVertexID, int fourthVertexID) + + void setMeshTetrahedra (const string& meshName, vector[int] vertices) + + # data access + + bool hasData (const string& dataName, const string& meshName) const + + void writeData (const string& meshName, const string& dataName, vector[int] vertices, vector[double] values) + + void readData (const string& meshName, const string& dataName, vector[int] vertices, const double relativeReadTime, vector[double]& values) const + + # direct access + + void setMeshAccessRegion (const string& meshName, vector[double] boundingBox) const + + void getMeshVerticesAndIDs (const string& meshName, vector[int]& ids, vector[double]& coordinates) const + + # Gradient related API + + bool requiresGradientDataFor(const string& meshName, const string& dataName) const + + void writeGradientData(const string& meshName, const string& dataName, vector[int] vertices, vector[double] gradientValues) + + +cdef extern from "precice/Tooling.hpp" namespace "precice": + string getVersionInformation() diff --git a/cyprecice/SolverInterface.pxd b/cyprecice/SolverInterface.pxd deleted file mode 100644 index e5282f68..00000000 --- a/cyprecice/SolverInterface.pxd +++ /dev/null @@ -1,114 +0,0 @@ -from libcpp cimport bool -from libcpp.set cimport set -from libcpp.string cimport string - -cdef extern from "" namespace "std": - cdef cppclass string_view: - string_view() except + - string_view(const string&) except + # necessary to cast Python strings to string_view before handing over to C++ API - - -cdef extern from "precice/SolverInterface.hpp" namespace "precice": - cdef cppclass SolverInterface: - # construction and configuration - - SolverInterface (const string&, const string&, int, int) except + - - SolverInterface (const string&, const string&, int, int, void*) except + - - void configure (const string&) - - # steering methods - - double initialize () - - double advance (double computedTimestepLength) - - void finalize() - - # status queries - - int getDimensions() const - - bool isCouplingOngoing() const - - bool isTimeWindowComplete() const - - bool requiresInitialData() - - bool requiresReadingCheckpoint() - - bool requiresWritingCheckpoint() - - # mesh access - - bool hasMesh (const char* meshName) const - - bool requiresMeshConnectivityFor (const char* meshName) const - - int setMeshVertex (const char* meshName, const double* position) - - int getMeshVertexSize (const char* meshName) const - - void setMeshVertices (const char* meshName, int size, const double* positions, int* ids) - - void setMeshEdge (const char* meshName, int firstVertexID, int secondVertexID) - - void setMeshEdges (const char* meshName, int size, const int* vertices) - - void setMeshTriangle (const char* meshName, int firstVertexID, int secondVertexID, int thirdVertexID) - - void setMeshTriangles (const char* meshName, int size, const int* vertices) - - void setMeshQuad (const char* meshName, int firstVertexID, int secondVertexID, int thirdVertexID, int fourthVertexID) - - void setMeshQuads (const char* meshName, int size, const int* vertices) - - # data access - - bool hasData (const char* dataName, const char* meshName) const - - void writeBlockVectorData (const char* meshName, const char* dataName, const int size, const int* valueIndices, const double* values) - - void writeVectorData (const char* meshName, const char* dataName, const int valueIndex, const double* value) - - void writeBlockScalarData (const char* meshName, const char* dataName, const int size, const int* valueIndices, const double* values) - - void writeScalarData (const char* meshName, const char* dataName, const int valueIndex, const double value) - - void readBlockVectorData (const char* meshName, const char* dataName, const int size, const int* valueIndices, double* values) const - - void readBlockVectorData (const char* meshName, const char* dataName, const int size, const int* valueIndices, double relativeReadTime, double* values) const - - void readVectorData (const char* meshName, const char* dataName, const int valueIndex, double* value) const - - void readVectorData (const char* meshName, const char* dataName, const int valueIndex, double relativeReadTime, double* value) const - - void readBlockScalarData (const char* meshName, const char* dataName, const int size, const int* valueIndices, double* values) const - - void readBlockScalarData (const char* meshName, const char* dataName, const int size, const int* valueIndices, double relativeReadTime, double* values) const - - void readScalarData (const char* meshName, const char* dataName, const int valueIndex, double& value) const - - void readScalarData (const char* meshName, const char* dataName, const int valueIndex, double relativeReadTime, double& value) const - - # Gradient related API - - bool requiresGradientDataFor(const char* meshName, const char* dataName) const - - void writeBlockVectorGradientData(const char* meshName, const char* dataName, int size, const int* valueIndices, const double* gradientValues) - - void writeScalarGradientData(const char* meshName, const char* dataName, int valueIndex, const double* gradientValues) - - void writeVectorGradientData(const char* meshName, const char* dataName, int valueIndex, const double* gradientValues) - - void writeBlockScalarGradientData(const char* meshName, const char* dataName, int size, const int* valueIndices, const double* gradientValues) - - # direct mesh access - - void setMeshAccessRegion (const char* meshName, const double* boundingBox) const - - void getMeshVerticesAndIDs (const char* meshName, const int size, int* ids, double* coordinates) const - -cdef extern from "precice/Tooling.hpp" namespace "precice": - string getVersionInformation() diff --git a/cyprecice/cyprecice.pxd b/cyprecice/cyprecice.pxd index 0e6953db..32e819e4 100644 --- a/cyprecice/cyprecice.pxd +++ b/cyprecice/cyprecice.pxd @@ -5,10 +5,10 @@ The python module precice offers python language bindings to the C++ coupling li cimport numpy as np cimport cython -cimport SolverInterface +cimport Participant as CppParticipant from cpython.version cimport PY_MAJOR_VERSION # important for determining python version in order to properly normalize string input. See http://docs.cython.org/en/latest/src/tutorial/strings.html#general-notes-about-c-strings and https://github.com/precice/precice/issues/68 . @cython.embedsignature(True) -cdef class Interface: - cdef SolverInterface.SolverInterface *thisptr # hold a C++ instance being wrapped +cdef class Participant: + cdef CppParticipant.Participant *thisptr # hold a C++ instance being wrapped diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 52289a5f..0eaf1cb0 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -10,14 +10,10 @@ import numpy as np from mpi4py import MPI import warnings from libcpp.string cimport string +from libcpp.vector cimport vector from cpython.version cimport PY_MAJOR_VERSION # important for determining python version in order to properly normalize string input. See http://docs.cython.org/en/latest/src/tutorial/strings.html#general-notes-about-c-strings and https://github.com/precice/precice/issues/68 . -cdef extern from "" namespace "std": - cdef cppclass string_view: - string_view() except + - string_view(const string&) except + # necessary to cast Python strings to string_view before handing over to C++ API - cdef bytes convert(s): """ source code from http://docs.cython.org/en/latest/src/tutorial/strings.html#general-notes-about-c-strings @@ -38,15 +34,14 @@ def check_array_like(argument, argument_name, function_name): raise TypeError("{} requires array_like input for {}, but was provided the following input type: {}".format( function_name, argument_name, type(argument))) from None -cdef class Interface: +cdef class Participant: """ Main Application Programming Interface of preCICE. To adapt a solver to preCICE, follow the following main structure: - - Create an object of SolverInterface with Interface() - - Configure the object with Interface::configure() - - Initialize preCICE with Interface::initialize() - - Advance to the next (time)step with Interface::advance() - - Finalize preCICE with Interface::finalize() + - Create an object of Participant with Participant() + - Initialize preCICE with Participant::initialize() + - Advance to the next (time)step with Participant::advance() + - Finalize preCICE with Participant::finalize() - We use solver, simulation code, and participant as synonyms. - The preferred name in the documentation is participant. """ @@ -54,7 +49,7 @@ cdef class Interface: # fake __init__ needed to display docstring for __cinit__ (see https://stackoverflow.com/a/42733794/5158031) def __init__(self, solver_name, configuration_file_name, solver_process_index, solver_process_size, communicator=None): """ - Constructor of Interface class. + Constructor of Participant class. Parameters ---------- @@ -71,12 +66,12 @@ cdef class Interface: Returns ------- - SolverInterface : object - Object pointing to the defined coupling interface + Participant : object + Object pointing to the defined participant Example ------- - >>> interface = precice.Interface("SolverOne", "precice-config.xml", 0, 1) + >>> participant = precice.Participant("SolverOne", "precice-config.xml", 0, 1) preCICE: This is preCICE version X.X.X preCICE: Revision info: vX.X.X-X-XXXXXXXXX preCICE: Configuring preCICE with configuration: "precice-config.xml" @@ -88,17 +83,18 @@ cdef class Interface: cdef void* communicator_ptr if communicator: communicator_ptr = communicator - self.thisptr = new SolverInterface.SolverInterface (convert(solver_name), convert(configuration_file_name), solver_process_index, solver_process_size, communicator_ptr) + self.thisptr = new CppParticipant.Participant (convert(solver_name), convert(configuration_file_name), solver_process_index, solver_process_size, communicator_ptr) else: - self.thisptr = new SolverInterface.SolverInterface (convert(solver_name), convert(configuration_file_name), solver_process_index, solver_process_size) + self.thisptr = new CppParticipant.Participant (convert(solver_name), convert(configuration_file_name), solver_process_index, solver_process_size) pass def __dealloc__ (self): """ - Destructor of Interface class + Destructor of Participant class """ del self.thisptr + # steering methods def initialize (self): @@ -118,7 +114,7 @@ cdef class Interface: max_timestep : double Maximum length of first timestep to be computed by the solver. """ - return self.thisptr.initialize () + self.thisptr.initialize () def advance (self, double computed_timestep_length): @@ -130,11 +126,6 @@ cdef class Interface: computed_timestep_length : double Length of timestep used by the solver. - Returns - ------- - max_timestep : double - Maximum length of next timestep to be computed by solver. - Notes ----- Previous calls: @@ -151,7 +142,7 @@ cdef class Interface: [Second Participant] Configured post processing schemes are applied. Meshes with data are exported to files if configured. """ - return self.thisptr.advance (computed_timestep_length) + self.thisptr.advance (computed_timestep_length) def finalize (self): @@ -169,19 +160,45 @@ cdef class Interface: """ self.thisptr.finalize () + # status queries - def get_dimensions (self): + def get_mesh_dimensions (self, mesh_name): """ - Returns the number of spatial dimensions configured. Currently, two and three dimensional problems - can be solved using preCICE. The dimension is specified in the XML configuration. + Returns the spatial dimensionality of the given mesh. + + Parameters + ---------- + mesh_name : string + Name of the mesh. Returns ------- dimension : int - The configured dimension. + The dimensions of the given mesh. + """ + + return self.thisptr.getMeshDimensions (convert(mesh_name)) + + + def get_data_dimensions (self, mesh_name, data_name): """ - return self.thisptr.getDimensions () + Returns the spatial dimensionality of the given data on the given mesh. + + Parameters + ---------- + mesh_name : string + Name of the mesh. + data_name : string + Name of the data. + + Returns + ------- + dimension : int + The dimensions of the given data. + """ + + return self.thisptr.getDataDimensions (convert(mesh_name), convert(data_name)) def is_coupling_ongoing (self): @@ -224,6 +241,27 @@ cdef class Interface: """ return self.thisptr.isTimeWindowComplete () + + def get_max_time_step_size (self): + """ + Get the maximum allowed time step size of the current window. + + Allows the user to query the maximum allowed time step size in the current window. + This should be used to compute the actual time step that the solver uses. + + Returns + ------- + tag : double + Maximum size of time step to be computed by solver. + + Notes + ----- + Previous calls: + initialize() has been called successfully. + """ + return self.thisptr.getMaxTimeStepSize () + + def requires_initial_data (self): """ Checks if the participant is required to provide initial data. @@ -262,7 +300,7 @@ cdef class Interface: def requires_reading_checkpoint (self): """ Checks if the participant is required to read an iteration checkpoint. - + If true, the participant is required to read an iteration checkpoint before calling advance(). @@ -294,15 +332,12 @@ cdef class Interface: tag : bool Returns true is the mesh is used. """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() + return self.thisptr.hasMesh (convert(mesh_name)) - return self.thisptr.hasMesh ( mesh_name_py_bytes) - - def get_mesh_handle(self, mesh_name): + def requires_mesh_connectivity_for (self, mesh_name): """ - Returns a handle to a created mesh. - WARNING: This function is not yet available for the Python bindings + Checks if the given mesh requires connectivity. Parameters ---------- @@ -311,10 +346,10 @@ cdef class Interface: Returns ------- - tag : object - Handle to the mesh. + tag : bool + True if mesh connectivity is required. """ - raise Exception("The API method get_mesh_handle is not yet available for the Python bindings.") + return self.thisptr.requiresMeshConnectivityFor(convert(mesh_name)) def set_mesh_vertex(self, mesh_name, position): @@ -342,17 +377,17 @@ cdef class Interface: if len(position) > 0: dimensions = len(position) - assert dimensions == self.get_dimensions(), "Dimensions of vertex coordinate in set_mesh_vertex does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) + assert dimensions == self.get_mesh_dimensions(mesh_name), "Dimensions of vertex coordinate in set_mesh_vertex does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_mesh_dimensions(mesh_name)) elif len(position) == 0: - dimensions = self.get_dimensions() + dimensions = self.get_mesh_dimensions(mesh_name) - cdef np.ndarray[double, ndim=1] _position = np.ascontiguousarray(position, dtype=np.double) + cdef vector[double] cpp_position = position - cdef bytes mesh_name_py_bytes = mesh_name.encode() + vertex_id = self.thisptr.setMeshVertex(convert(mesh_name), cpp_position) - vertex_id = self.thisptr.setMeshVertex( mesh_name_py_bytes, _position.data) return vertex_id + def get_mesh_vertex_size (self, mesh_name): """ Returns the number of vertices of a mesh @@ -367,9 +402,9 @@ cdef class Interface: sum : int Number of vertices of the mesh. """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - return self.thisptr.getMeshVertexSize( mesh_name_py_bytes) + return self.thisptr.getMeshVertexSize(convert(mesh_name)) + def set_mesh_vertices (self, mesh_name, positions): """ @@ -403,7 +438,7 @@ cdef class Interface: >>> positions.shape (5, 2) >>> mesh_name = "MeshOne" - >>> vertex_ids = interface.set_mesh_vertices(mesh_name, positions) + >>> vertex_ids = participant.set_mesh_vertices(mesh_name, positions) >>> vertex_ids.shape (5,) @@ -413,7 +448,7 @@ cdef class Interface: >>> positions.shape (5, 3) >>> mesh_name = "MeshOne" - >>> vertex_ids = interface.set_mesh_vertices(mesh_name, positions) + >>> vertex_ids = participant.set_mesh_vertices(mesh_name, positions) >>> vertex_ids.shape (5,) """ @@ -424,19 +459,20 @@ cdef class Interface: if len(positions) > 0: size, dimensions = positions.shape - assert dimensions == self.get_dimensions(), "Dimensions of vertex coordinates in set_mesh_vertices does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) + assert dimensions == self.get_mesh_dimensions(mesh_name), "Dimensions of vertex coordinates in set_mesh_vertices does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_mesh_dimensions(mesh_name)) elif len(positions) == 0: - size = positions.shape[0] - dimensions = self.get_dimensions() + size = 0 + dimensions = self.get_mesh_dimensions(mesh_name) + + cdef vector[double] cpp_positions = positions.flatten() + cdef vector[int] cpp_ids = [-1 for _ in range(size)] + + self.thisptr.setMeshVertices (convert(mesh_name), cpp_positions, cpp_ids) - cdef np.ndarray[double, ndim=1] _positions = np.ascontiguousarray(positions.flatten(), dtype=np.double) - cdef np.ndarray[int, ndim=1] vertex_ids = np.empty(size, dtype=np.int32) + cdef np.ndarray[int, ndim=1] np_ids = np.array(cpp_ids, dtype=np.int32) - cdef bytes mesh_name_py_bytes = mesh_name.encode() - #cdef str mesh_name_str = mesh_name.encode() + return np_ids - self.thisptr.setMeshVertices ( mesh_name_py_bytes, size, _positions.data, vertex_ids.data) - return vertex_ids def set_mesh_edge (self, mesh_name, first_vertex_id, second_vertex_id): """ @@ -461,9 +497,9 @@ cdef class Interface: Previous calls: vertices with firstVertexID and secondVertexID were added to the mesh with name mesh_name """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - self.thisptr.setMeshEdge ( mesh_name_py_bytes, first_vertex_id, second_vertex_id) + self.thisptr.setMeshEdge (convert(mesh_name), first_vertex_id, second_vertex_id) + def set_mesh_edges (self, mesh_name, vertices): """ @@ -484,7 +520,7 @@ cdef class Interface: >>> vertices = np.array([[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]) >>> vertices.shape (6, 2) - >>> interface.set_mesh_edges(mesh_name, vertices) + >>> participant.set_mesh_edges(mesh_name, vertices) """ check_array_like(vertices, "vertices", "set_mesh_edges") @@ -492,17 +528,15 @@ cdef class Interface: vertices = np.asarray(vertices) if len(vertices) > 0: - size, n = vertices.shape + _, n = vertices.shape assert n == 2, "Provided vertices are not of a [N x 2] format, but instead of a [N x {}]".format(n) elif len(vertices) == 0: - size = vertices.shape[0] - dimensions = self.get_dimensions() + dimensions = self.get_mesh_dimensions(mesh_name) - cdef np.ndarray[double, ndim=1] _vertices = np.ascontiguousarray(vertices.flatten(), dtype=np.int) + cdef vector[int] cpp_vertices = vertices.flatten() - cdef bytes mesh_name_py_bytes = mesh_name.encode() + self.thisptr.setMeshEdges (convert(mesh_name), cpp_vertices) - self.thisptr.setMeshEdges ( mesh_name_py_bytes, size, _vertices.data) def set_mesh_triangle (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id): """ @@ -524,9 +558,9 @@ cdef class Interface: Previous calls: vertices with first_vertex_id, second_vertex_id, and third_vertex_id were added to the mesh with the name mesh_name """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - self.thisptr.setMeshTriangle ( mesh_name_py_bytes, first_vertex_id, second_vertex_id, third_vertex_id) + self.thisptr.setMeshTriangle (convert(mesh_name), first_vertex_id, second_vertex_id, third_vertex_id) + def set_mesh_triangles (self, mesh_name, vertices): """ @@ -535,7 +569,7 @@ cdef class Interface: Parameters ---------- mesh_name : str - Name of the mesh to add the vertices to. + Name of the mesh to add the triangles to. vertices : array_like The IDs of the vertices in a numpy array [N x 3] where N = number of triangles and D = dimensions of geometry. @@ -547,7 +581,7 @@ cdef class Interface: >>> vertices = np.array([[1, 2, 3], [1, 3, 4], [1, 2, 4], [1, 3, 4]]) >>> vertices.shape (4, 2) - >>> interface.set_mesh_triangles(mesh_name, vertices) + >>> participant.set_mesh_triangles(mesh_name, vertices) """ check_array_like(vertices, "vertices", "set_mesh_triangles") @@ -555,17 +589,15 @@ cdef class Interface: vertices = np.asarray(vertices) if len(vertices) > 0: - size, n = vertices.shape - assert n == 3, "Provided vertices are not of a [N x 3] format, but instead of a [N x {}]".format(n) + _, n = vertices.shape + assert n == self.get_mesh_dimensions(mesh_name), "Provided vertices are not of a [N x {}] format, but instead of a [N x {}]".format(self.get_mesh_dimensions(mesh_name), n) elif len(vertices) == 0: - size = vertices.shape[0] - dimensions = self.get_dimensions() + dimensions = self.get_mesh_dimensions(mesh_name) - cdef np.ndarray[double, ndim=1] _vertices = np.ascontiguousarray(vertices.flatten(), dtype=np.int) + cdef vector[int] cpp_vertices = vertices.flatten() + + self.thisptr.setMeshTriangles (convert(mesh_name), cpp_vertices) - cdef bytes mesh_name_py_bytes = mesh_name.encode() - - self.thisptr.setMeshTriangles ( mesh_name_py_bytes, size, _vertices.data) def set_mesh_quad (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id): """ @@ -574,7 +606,7 @@ cdef class Interface: Parameters ---------- mesh_name : str - Name of the mesh to add the Quad to. + Name of the mesh to add the quad to. first_vertex_id : int ID of the first vertex of the quad. second_vertex_id : int @@ -590,9 +622,9 @@ cdef class Interface: vertices with first_vertex_id, second_vertex_id, third_vertex_id, and fourth_vertex_id were added to the mesh with the name mesh_name """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - - self.thisptr.setMeshQuad ( mesh_name_py_bytes, first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id) + + self.thisptr.setMeshQuad (convert(mesh_name), first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id) + def set_mesh_quads (self, mesh_name, vertices): """ @@ -601,7 +633,7 @@ cdef class Interface: Parameters ---------- mesh_name : str - Name of the mesh to add the vertices to. + Name of the mesh to add the quads to. vertices : array_like The IDs of the vertices in a numpy array [N x 4] where N = number of quads and D = dimensions of geometry. @@ -613,7 +645,7 @@ cdef class Interface: >>> vertices = np.array([[1, 2, 3, 4]]) >>> vertices.shape (1, 2) - >>> interface.set_mesh_quads(mesh_name, vertices) + >>> participant.set_mesh_quads(mesh_name, vertices) """ check_array_like(vertices, "vertices", "set_mesh_quads") @@ -621,36 +653,80 @@ cdef class Interface: vertices = np.asarray(vertices) if len(vertices) > 0: - size, n = vertices.shape + _, n = vertices.shape assert n == 4, "Provided vertices are not of a [N x 4] format, but instead of a [N x {}]".format(n) elif len(vertices) == 0: - size = vertices.shape[0] - dimensions = self.get_dimensions() + dimensions = self.get_mesh_dimensions(mesh_name) - cdef np.ndarray[double, ndim=1] _vertices = np.ascontiguousarray(vertices.flatten(), dtype=np.int) + cdef vector[int] cpp_vertices = vertices.flatten() - cdef bytes mesh_name_py_bytes = mesh_name.encode() + self.thisptr.setMeshQuads (convert(mesh_name), cpp_vertices) - self.thisptr.setMeshQuads ( mesh_name_py_bytes, size, _vertices.data) - # data access + def set_mesh_tetrahedron (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id): + """ + Sets a mesh tetrahedron from vertex IDs. - def requires_mesh_connectivity_for (self, mesh_name): + Parameters + ---------- + mesh_name : str + Name of the mesh to add the tetrahedron to. + first_vertex_id : int + ID of the first vertex of the tetrahedron. + second_vertex_id : int + ID of the second vertex of the tetrahedron. + third_vertex_id : int + ID of the third vertex of the tetrahedron. + fourth_vertex_id : int + ID of the third vertex of the tetrahedron. + + Notes + ----- + Previous calls: + vertices with first_vertex_id, second_vertex_id, third_vertex_id, and fourth_vertex_id were added + to the mesh with the name mesh_name """ - Checks if the given mesh requires connectivity. + + self.thisptr.setMeshTetrahedron (convert(mesh_name), first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id) + + + def set_mesh_tetrahedra (self, mesh_name, vertices): + """ + Creates multiple mesh tetdrahedrons Parameters ---------- mesh_name : str - ID of the associated mesh. + Name of the mesh to add the tetrahedrons to. + vertices : array_like + The IDs of the vertices in a numpy array [N x 4] where + N = number of quads and D = dimensions of geometry. - Returns - ------- - tag : bool - True if mesh connectivity is required. + Examples + -------- + Set mesh tetrahedrons for a problem with 4 mesh vertices. + + >>> vertices = np.array([[1, 2, 3, 4]]) + >>> vertices.shape + (1, 2) + >>> participant.set_mesh_tetradehra(mesh_name, vertices) """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - return self.thisptr.requiresMeshConnectivityFor( mesh_name_py_bytes) + check_array_like(vertices, "vertices", "set_mesh_tetrahedra") + + if not isinstance(vertices, np.ndarray): + vertices = np.asarray(vertices) + + if len(vertices) > 0: + _, n = vertices.shape + assert n == 4, "Provided vertices are not of a [N x 4] format, but instead of a [N x {}]".format(n) + elif len(vertices) == 0: + dimensions = self.get_mesh_dimensions(mesh_name) + + cdef vector[int] cpp_vertices = vertices.flatten() + + self.thisptr.setMeshTetrahedra (convert(mesh_name), cpp_vertices) + + # data access def has_data (self, data_name, mesh_name): """ @@ -668,15 +744,14 @@ cdef class Interface: tag : bool True if the mesh is already used. """ - cdef bytes data_name_py_bytes = data_name.encode() - cdef bytes mesh_name_py_bytes = mesh_name.encode() - return self.thisptr.hasData( data_name_py_bytes, mesh_name_py_bytes) + return self.thisptr.hasData(convert(data_name), convert(mesh_name)) + - def write_block_vector_data (self, mesh_name, data_name, vertex_ids, values): + def write_data (self, mesh_name, data_name, vertex_ids, values): """ - Writes vector data given as block. This function writes values of specified vertices to a dataID. - Values are provided as a block of continuous memory. Values are stored in a numpy array [N x D] where N = number - of vertices and D = dimensions of geometry + This function writes values of specified vertices to data of a mesh. + Values are provided as a block of continuous memory defined by values. Values are stored in a numpy array [N x D] where N = number of vertices and D = dimensions of geometry. + The order of the provided data follows the order specified by vertices. Parameters ---------- @@ -687,7 +762,7 @@ cdef class Interface: vertex_ids : array_like Indices of the vertices. values : array_like - Vector values of data + Values of data Notes ----- @@ -698,186 +773,55 @@ cdef class Interface: Examples -------- - Write block vector data for a 2D problem with 5 vertices: + Write scalar data for a 2D problem with 5 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" >>> vertex_ids = [1, 2, 3, 4, 5] - >>> values = np.array([[v1_x, v1_y], [v2_x, v2_y], [v3_x, v3_y], [v4_x, v4_y], [v5_x, v5_y]]) - >>> interface.write_block_vector_data(mesh_name, data_name, vertex_ids, values) - - Write block vector data for a 3D (D=3) problem with 5 (N=5) vertices: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_ids = [1, 2, 3, 4, 5] - >>> values = np.array([[v1_x, v1_y, v1_z], [v2_x, v2_y, v2_z], [v3_x, v3_y, v3_z], [v4_x, v4_y, v4_z], [v5_x, v5_y, v5_z]]) - >>> interface.write_block_vector_data(mesh_name, data_name, vertex_ids, values) - """ - check_array_like(vertex_ids, "vertex_ids", "write_block_vector_data") - check_array_like(values, "values", "write_block_vector_data") - - if not isinstance(values, np.ndarray): - values = np.asarray(values) - - if len(values) > 0: - size, dimensions = values.shape - assert dimensions == self.get_dimensions(), "Dimensions of vector data in write_block_vector_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) - if len(values) == 0: - size = 0 - - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _values = np.ascontiguousarray(values.flatten(), dtype=np.double) - - assert _values.size == size * self.get_dimensions(), "Vector data is not provided for all vertices in write_block_vector_data. Check length of input data provided. Provided size: {}, expected size: {}".format(_values.size, size * self.get_dimensions()) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_vector_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - self.thisptr.writeBlockVectorData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, _values.data) - - def write_vector_data (self, mesh_name, data_name, vertex_id, value): - """ - Writes vector data to a vertex. This function writes a value of a specified vertex to a dataID. - Values are provided as a block of continuous memory. - The 2D-format of value is a numpy array of shape 2 - The 3D-format of value is a numpy array of shape 3 - - Parameters - ---------- - mesh_name : str - Name of the mesh to write to. - data_name : str - ID to write to. - vertex_id : int - Index of the vertex. - value : array_like - Single vector value - - Notes - ----- - Previous calls: - count of available elements at value matches the configured dimension - initialize() has been called + >>> values = np.array([v1, v2, v3, v4, v5]) + >>> participant.write_data(mesh_name, data_name, vertex_ids, values) - Examples - -------- Write vector data for a 2D problem with 5 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> value = np.array([v5_x, v5_y]) - >>> interface.write_vector_data(mesh_name, data_name, vertex_id, value) + >>> vertex_ids = [1, 2, 3, 4, 5] + >>> values = np.array([[v1_x, v1_y], [v2_x, v2_y], [v3_x, v3_y], [v4_x, v4_y], [v5_x, v5_y]]) + >>> participant.write_data(mesh_name, data_name, vertex_ids, values) Write vector data for a 3D (D=3) problem with 5 (N=5) vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> value = np.array([v5_x, v5_y, v5_z]) - >>> interface.write_vector_data(mesh_name, data_name, vertex_id, value) - """ - check_array_like(value, "value", "write_vector_data") - assert len(value) > 0, "Input vector data is empty in write_vector_data" - - dimensions = len(value) - - assert dimensions == self.get_dimensions(), "Dimensions of vector data in write_vector_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) - - cdef np.ndarray[np.double_t, ndim=1] _value = np.ascontiguousarray(value, dtype=np.double) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - self.thisptr.writeVectorData ( mesh_name_py_bytes, data_name_py_bytes, vertex_id, _value.data) - - def write_block_scalar_data (self, mesh_name, data_name, vertex_ids, values): - """ - Writes scalar data given as a block. This function writes values of specified vertices to a dataID. - - Parameters - ---------- - mesh_name : str - Name of the mesh to write to. - data_name : str - ID to write to. - vertex_ids : array_like - Indices of the vertices. - values : array_like - Values to be written - - Notes - ----- - Previous calls: - count of available elements at values matches the given size - count of available elements at vertex_ids matches the given size - initialize() has been called - - Examples - -------- - Write block scalar data for a 2D and 3D problem with 5 (N=5) vertices: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" >>> vertex_ids = [1, 2, 3, 4, 5] - >>> values = np.array([v1, v2, v3, v4, v5]) - >>> interface.write_block_scalar_data(mesh_name, data_name, vertex_ids, values) + >>> values = np.array([[v1_x, v1_y, v1_z], [v2_x, v2_y, v2_z], [v3_x, v3_y, v3_z], [v4_x, v4_y, v4_z], [v5_x, v5_y, v5_z]]) + >>> participant.write_data(mesh_name, data_name, vertex_ids, values) """ - check_array_like(vertex_ids, "vertex_ids", "write_block_scalar_data") - check_array_like(values, "values", "write_block_scalar_data") + check_array_like(vertex_ids, "vertex_ids", "write_data") + check_array_like(values, "values", "write_data") + + if not isinstance(values, np.ndarray): + values = np.asarray(values) - if len(values) > 0: - assert(len(vertex_ids) == len(values)) - size = len(vertex_ids) if len(values) == 0: size = 0 + elif self.get_data_dimensions(mesh_name, data_name) == 1: + size = values.flatten().shape[0] + dimensions = 1 + else: + assert len(values.shape) == 2, "Vector valued data has to be provided as a numpy array of shape [N x D] where N = number of vertices and D = number of dimensions." + size, dimensions = values.shape - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _values = np.ascontiguousarray(values, dtype=np.double) + assert dimensions == self.get_data_dimensions(mesh_name, data_name), "Dimensions of vector data in write_data do not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_data_dimensions(mesh_name, data_name)) - assert _values.size == size, "Scalar data is not provided for all vertices in write_block_scalar_data. Check size of input data provided. Provided size: {}, expected size: {}".format(_values.size, size) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_scalar_data. Check size of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) + assert len(vertex_ids) == size, "Vertex IDs are of incorrect length in write_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids.size, size) - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() + cdef vector[int] cpp_ids = vertex_ids + cdef vector[double] cpp_values = values.flatten() - self.thisptr.writeBlockScalarData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, _values.data) + self.thisptr.writeData (convert(mesh_name), convert(data_name), cpp_ids, cpp_values) - def write_scalar_data (self, mesh_name, data_name, vertex_id, double value): - """ - Writes scalar data to a vertex - This function writes a value of a specified vertex to a dataID. - Parameters - ---------- - mesh_name : str - Name of the mesh to write to. - data_name : str - ID to write to. - vertex_id : int - Index of the vertex. - value : double - The value to write. - - Notes - ----- - Previous calls: - initialize() has been called - - Examples - -------- - Write scalar data for a 2D or 3D problem with 5 vertices: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> value = v5 - >>> interface.write_scalar_data(mesh_name, data_name, vertex_id, value) + def read_data (self, mesh_name, data_name, vertex_ids, relative_read_time): """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - self.thisptr.writeScalarData ( mesh_name_py_bytes, data_name_py_bytes, vertex_id, value) - - def read_block_vector_data (self, mesh_name, data_name, vertex_ids, relative_read_time=None): - """ - Reads vector data into a provided block. This function reads values of specified vertices + Reads data into a provided block. This function reads values of specified vertices from a dataID. Values are read into a block of continuous memory. Parameters @@ -905,202 +849,60 @@ cdef class Interface: Examples -------- - Read block vector data for a 2D problem with 5 vertices: + Read scalar data for a 2D problem with 5 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" >>> vertex_ids = [1, 2, 3, 4, 5] - >>> values = read_block_vector_data(mesh_name, data_name, vertex_ids) + >>> values = read_data(mesh_name, data_name, vertex_ids) >>> values.shape - >>> (5, 2) + >>> (5, ) - Read block vector data for a 3D system with 5 vertices: + Read vector data for a 2D problem with 5 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" >>> vertex_ids = [1, 2, 3, 4, 5] - >>> values = read_block_vector_data(mesh_name, data_name, vertex_ids) + >>> values = read_data(mesh_name, data_name, vertex_ids) >>> values.shape - >>> (5, 3) - """ - check_array_like(vertex_ids, "vertex_ids", "read_block_vector_data") - - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = _vertex_ids.size - dimensions = self.get_dimensions() - cdef np.ndarray[np.double_t, ndim=1] _values = np.empty(size * dimensions, dtype=np.double) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - #cdef str mesh_name_str = mesh_name.encode() - #cdef str data_name_str = data_name.encode() - - if relative_read_time is None: - self.thisptr.readBlockVectorData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, _values.data) - else: - self.thisptr.readBlockVectorData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, relative_read_time, _values.data) - return _values.reshape((size, dimensions)) - - def read_vector_data (self, mesh_name, data_name, vertex_id, relative_read_time=None): - """ - Reads vector data form a vertex. This function reads a value of a specified vertex - from a dataID. - - Parameters - ---------- - mesh_name : str - Name of the mesh to read from. - data_name : str - ID to read from. - vertex_id : int - Index of the vertex. - relative_read_time : double - Point in time where data is read relative to the beginning of the current time step - - Returns - ------- - value : numpy.ndarray - Contains the read data. - - Notes - ----- - Previous calls: - count of available elements at value matches the configured dimension - initialize() has been called - - Examples - -------- - Read vector data for 2D problem: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> value = interface.read_vector_data(mesh_name, data_name, vertex_id) - >>> value.shape - (1, 2) - - Read vector data for 3D problem: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> value = interface.read_vector_data(mesh_name, data_name, vertex_id) - >>> value.shape - (1, 3) - """ - dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _value = np.empty(dimensions, dtype=np.double) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - if relative_read_time is None: - self.thisptr.readVectorData ( mesh_name_py_bytes, data_name_py_bytes, vertex_id, _value.data) - else: - self.thisptr.readVectorData ( mesh_name_py_bytes, data_name_py_bytes, vertex_id, relative_read_time, _value.data) - - return _value - - def read_block_scalar_data (self, mesh_name, data_name, vertex_ids, relative_read_time=None): - """ - Reads scalar data as a block. This function reads values of specified vertices from a dataID. - Values are provided as a block of continuous memory. - - Parameters - ---------- - mesh_name : str - Name of the mesh to read from. - data_name : str - ID to read from. - vertex_ids : array_like - Indices of the vertices. - relative_read_time : double - Point in time where data is read relative to the beginning of the current time step - - Returns - ------- - values : numpy.ndarray - Contains the read data. - - Notes - ----- - Previous calls: - count of available elements at values matches the given size - count of available elements at vertex_ids matches the given size - initialize() has been called + >>> (5, 2) - Examples - -------- - Read block scalar data for 2D and 3D problems with 5 vertices: + Read vector data for a 3D system with 5 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" >>> vertex_ids = [1, 2, 3, 4, 5] - >>> values = interface.read_block_scalar_data(mesh_name, data_name, vertex_ids) - >>> values.size - >>> 5 - + >>> values = read_data(mesh_name, data_name, vertex_ids) + >>> values.shape + >>> (5, 3) """ - check_array_like(vertex_ids, "vertex_ids", "read_block_scalar_data") + check_array_like(vertex_ids, "vertex_ids", "read_data") - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = _vertex_ids.size - cdef np.ndarray[double, ndim=1] _values = np.empty(size, dtype=np.double) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - if relative_read_time is None: - self.thisptr.readBlockScalarData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, _values.data) + if len(vertex_ids) == 0: + size = 0 + dimensions = self.get_data_dimensions(mesh_name, data_name) + elif self.get_data_dimensions(mesh_name, data_name) == 1: + size = len(vertex_ids) + dimensions = 1 else: - self.thisptr.readBlockScalarData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, relative_read_time, _values.data) - - return _values - - def read_scalar_data (self, mesh_name, data_name, vertex_id, relative_read_time=None): - """ - Reads scalar data of a vertex. This function needs a value of a specified vertex from a dataID. - - Parameters - ---------- - mesh_name : str - Name of the mesh to read to. - data_name : str - ID to read from. - vertex_id : int - Index of the vertex. - relative_read_time : double - Point in time where data is read relative to the beginning of the current time step - - Returns - ------- - value : double - Contains the read value + size = len(vertex_ids) + dimensions = self.get_data_dimensions(mesh_name, data_name) - Notes - ----- - Previous calls: - initialize() has been called. + cdef vector[int] cpp_ids = vertex_ids + cdef vector[double] cpp_values = [-1 for _ in range(size * dimensions)] - Examples - -------- - Read scalar data for 2D and 3D problems: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> value = interface.read_scalar_data(mesh_name, data_name, vertex_id) - """ - cdef double _value + self.thisptr.readData (convert(mesh_name), convert(data_name), cpp_ids, relative_read_time, cpp_values) - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() + cdef np.ndarray[double, ndim=1] np_values = np.array(cpp_values, dtype=np.double) - if relative_read_time is None: - self.thisptr.readScalarData ( mesh_name_py_bytes, data_name_py_bytes, vertex_id, _value) + if len(vertex_ids) == 0: + return np_values.reshape((size)) + elif self.get_data_dimensions(mesh_name, data_name) == 1: + return np_values.reshape((size)) else: - self.thisptr.readScalarData ( mesh_name_py_bytes, data_name_py_bytes, vertex_id, relative_read_time, _value) + return np_values.reshape((size, dimensions)) - return _value - def write_block_vector_gradient_data (self, mesh_name, data_name, vertex_ids, gradientValues): + def write_gradient_data (self, mesh_name, data_name, vertex_ids, gradients): """ - Writes vector gradient data given as block. This function writes gradient values of specified vertices to a dataID. + Writes gradient data given as block. This function writes gradient values of specified vertices to a dataID. Values are provided as a block of continuous memory. Values are stored in a numpy array [N x D] where N = number of vertices and D = number of gradient components. @@ -1112,7 +914,7 @@ cdef class Interface: Data name to write to. vertex_ids : array_like Indices of the vertices. - gradientValues : array_like + gradients : array_like Gradient values differentiated in the spacial direction (dx, dy) for 2D space, (dx, dy, dz) for 3D space Notes @@ -1125,219 +927,40 @@ cdef class Interface: Examples -------- - Write block gradient vector data for a 2D problem with 2 vertices: + Write gradient vector data for a 2D problem with 2 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" >>> vertex_ids = [1, 2] - >>> gradientValues = np.array([[v1x_dx, v1y_dx, v1x_dy, v1y_dy], [v2x_dx, v2y_dx, v2x_dy, v2y_dy]]) - >>> interface.write_block_vector_gradient_data(mesh_name, data_name, vertex_ids, gradientValues) + >>> gradients = np.array([[v1x_dx, v1y_dx, v1x_dy, v1y_dy], [v2x_dx, v2y_dx, v2x_dy, v2y_dy]]) + >>> participant.write_gradient_data(mesh_name, data_name, vertex_ids, gradients) - Write block vector data for a 3D (D=3) problem with 2 (N=2) vertices: + Write vector data for a 3D problem with 2 vertices: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" >>> vertex_ids = [1, 2] - >>> gradientValues = np.array([[v1x_dx, v1y_dx, v1z_dx, v1x_dy, v1y_dy, v1z_dy, v1x_dz, v1y_dz, v1z_dz], [v2x_dx, v2y_dx, v2z_dx, v2x_dy, v2y_dy, v2z_dy, v2x_dz, v2y_dz, v2z_dz]]) - >>> interface.write_block_vector_gradient_data(mesh_name, data_name, vertex_ids, gradientValues) + >>> gradients = np.array([[v1x_dx, v1y_dx, v1z_dx, v1x_dy, v1y_dy, v1z_dy, v1x_dz, v1y_dz, v1z_dz], [v2x_dx, v2y_dx, v2z_dx, v2x_dy, v2y_dy, v2z_dy, v2x_dz, v2y_dz, v2z_dz]]) + >>> participant.write_gradient_data(mesh_name, data_name, vertex_ids, gradients) """ - check_array_like(vertex_ids, "vertex_ids", "write_block_vector_gradient_data") - check_array_like(gradientValues, "gradientValues", "write_block_vector_gradient_data") + check_array_like(vertex_ids, "vertex_ids", "write_gradient_data") + check_array_like(gradients, "gradients", "write_gradient_data") - if not isinstance(gradientValues, np.ndarray): - gradientValues = np.asarray(gradientValues) + if not isinstance(gradients, np.ndarray): + gradients = np.asarray(gradients) - if len(gradientValues) > 0: - size, dimensions = gradientValues.shape - assert dimensions == self.get_dimensions() * self.get_dimensions(), "Dimensions of vector data in write_block_vector_gradient_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions() * self.get_dimensions()) - if len(gradientValues) == 0: + if len(gradients) > 0: + size, dimensions = gradients.shape + assert dimensions == self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions(mesh_name, data_name), "Dimensions of vector data in write_gradient_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions (mesh_name, data_name)) + if len(gradients) == 0: size = 0 - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef vector[int] cpp_vertex_ids = vertex_ids + cdef vector[double] cpp_gradients = gradients.flatten() - assert _gradientValues.size == size * self.get_dimensions() * self.get_dimensions(), "Dimension of vector gradient data provided in write_block_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, size * self.get_dimensions() * self.get_dimensions()) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_vector_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) + assert cpp_gradients.size() == size * self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions (mesh_name, data_name), "Dimension of gradient data provided in write_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(cpp_gradients.size(), size * self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions (mesh_name, data_name)) + assert cpp_vertex_ids.size() == size, "Vertex IDs are of incorrect length in write_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(cpp_vertex_ids.size(), size) - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() + self.thisptr.writeGradientData (convert(mesh_name), convert(data_name), cpp_vertex_ids, cpp_gradients) - self.thisptr.writeBlockVectorGradientData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, _gradientValues.data) - - def write_scalar_gradient_data (self, mesh_name, data_name, vertex_id, gradientValues): - """ - Writes scalar gradient data to a vertex - This function writes the corresponding gradient matrix value of a specified vertex to a dataID. - - The gradients need to be provided in the following format: - - The 2D-format of gradientValues is (v_dx, v_dy) vector corresponding to the data block v = (v) - differentiated respectively in x-direction dx and y-direction dy - - The 3D-format of gradientValues is (v_dx, v_dy, v_dz) vector - corresponding to the data block v = (v) differentiated respectively in spatial directions x-direction dx and y-direction dy and z-direction dz - - Parameters - ---------- - mesh_name : str - Name of the mesh to write to. - data_name : str - ID to write to. - vertex_id : int - Index of the vertex. - gradientValue : array_like - A vector of the gradient values. - - Notes - ----- - Count of available elements at value matches the configured dimension - Vertex with dataID exists and contains data - Data with dataID has attribute hasGradient = true - - Previous calls: - initialize() has been called - - Examples - -------- - Write scalar data for a 2D problem: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> gradientValue = [v5_dx, v5_dy] - >>> interface.write_scalar_gradient_data(mesh_name, data_name, vertex_id, gradientValue) - """ - check_array_like(gradientValues, "gradientValues", "write_scalar_gradient_data") - - if not isinstance(gradientValues, np.ndarray): - gradientValues = np.asarray(gradientValues) - - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - - assert _gradientValues.size == self.get_dimensions(), "Vector data provided for vertex {} in write_scalar_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, self.get_dimensions()) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - self.thisptr.writeScalarGradientData( mesh_name_py_bytes, data_name_py_bytes, vertex_id, _gradientValues.data) - - def write_vector_gradient_data (self, mesh_name, data_name, vertex_id, gradientValues): - """ - Writes vector gradient data to a vertex - This function writes the corresponding gradient matrix value of a specified vertex to a dataID. - - The gradients need to be provided in the following format: - - The 2D-format of \p gradientValues is (vx_dx, vy_dx, vx_dy, vy_dy) vector corresponding to the data block v = (vx, vy) - differentiated respectively in x-direction dx and y-direction dy - - The 3D-format of \p gradientValues is (vx_dx, vy_dx, vz_dx, vx_dy, vy_dy, vz_dy, vx_dz, vy_dz, vz_dz) vector - corresponding to the data block v = (vx, vy, vz) differentiated respectively in spatial directions x-direction dx and y-direction dy and z-direction dz - - Parameters - ---------- - mesh_name : str - Name of the mesh to write to. - data_name : str - ID to write to. - vertex_id : int - Index of the vertex. - gradientValue : array_like - A vector of the gradient values. - - Notes - ----- - Count of available elements at value matches the configured dimension - Vertex with dataID exists and contains data - Data with dataID has attribute hasGradient = true - - Previous calls: - initialize() has been called - - Examples - -------- - Write scalar data for a 2D problem: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_id = 5 - >>> gradientValue = [v5x_dx, v5y_dx, v5x_dy,v5y_dy] - >>> interface.write_vector_gradient_data(mesh_name, data_name, vertex_id, gradientValue) - """ - - check_array_like(gradientValues, "gradientValues", "write_vector_gradient_data") - - if not isinstance(gradientValues, np.ndarray): - gradientValues = np.asarray(gradientValues) - - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - - assert _gradientValues.size == self.get_dimensions() * self.get_dimensions(), "Dimensions of vector gradient data provided for vertex {} in write_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, self.get_dimensions() * self.get_dimensions()) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - self.thisptr.writeVectorGradientData( mesh_name_py_bytes, data_name_py_bytes, vertex_id, _gradientValues.data) - - def write_block_scalar_gradient_data (self, mesh_name, data_name, vertex_ids, gradientValues): - """ - Writes scalar gradient data given as block. This function writes values of specified vertices to a dataID. - Values are provided as a block of continuous memory. Values are stored in a numpy array [N x D] where N = number - of vertices and D = dimensions of geometry. - - Parameters - ---------- - mesh_name : str - Name of the mesh to write to. - data_name : str - Data name to write to. - vertex_ids : array_like - Indices of the vertices. - gradientValues : array_like - Gradient values differentiated in the spacial direction (dx, dy) for 2D space, (dx, dy, dz) for 3D space - - Notes - ----- - Previous calls: - Count of available elements at values matches the configured dimension - Count of available elements at vertex_ids matches the given size - Initialize() has been called - Data with dataID has attribute hasGradient = true - - Examples - -------- - Write block gradient scalar data for a 2D problem with 2 vertices: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_ids = [1, 2] - >>> gradientValues = np.array([[v1_dx, v1_dy], [v2_dx, v2_dy]]) - >>> interface.write_block_scalar_gradient_data(mesh_name, data_name, vertex_ids, gradientValues) - - Write block scalar data for a 3D (D=3) problem with 2 (N=2) vertices: - >>> mesh_name = "MeshOne" - >>> data_name = "DataOne" - >>> vertex_ids = [1, 2] - >>> values = np.array([[v1_dx, v1_dy, v1x_dz], [v2_dx, v2_dy, v2_dz]]) - >>> interface.write_block_scalar_gradient_data(mesh_name, data_name, vertex_ids, values) - """ - check_array_like(vertex_ids, "vertex_ids", "write_block_scalar_gradient_data") - check_array_like(gradientValues, "gradientValues", "write_block_sclar_gradient_data") - - if not isinstance(gradientValues, np.ndarray): - gradientValues = np.asarray(gradientValues) - - if len(gradientValues) > 0: - size, dimensions = gradientValues.shape - assert dimensions == self.get_dimensions() , "Dimensions of scalar gradient data provided in write_block_scalar_gradient_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) - if len(gradientValues) == 0: - size = 0 - - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - - assert _gradientValues.size == size * self.get_dimensions(), "Scalar gradient data is not provided for all vertices in write_block_scalar_gradient_data. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, size * self.get_dimensions()) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_scalar_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) - - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - - self.thisptr.writeBlockScalarGradientData ( mesh_name_py_bytes, data_name_py_bytes, size, _vertex_ids.data, _gradientValues.data) def requires_gradient_data_for(self, mesh_name, data_name): """ @@ -1360,12 +983,11 @@ cdef class Interface: Check if gradient data is required for a data: >>> mesh_name = "MeshOne" >>> data_name = "DataOne" - >>> interface.is_gradient_data_required(mesh_name, data_name) + >>> participant.is_gradient_data_required(mesh_name, data_name) """ - cdef bytes mesh_name_py_bytes = mesh_name.encode() - cdef bytes data_name_py_bytes = data_name.encode() - return self.thisptr.requiresGradientDataFor( mesh_name_py_bytes, data_name_py_bytes) + return self.thisptr.requiresGradientDataFor(convert(mesh_name), convert(data_name)) + def set_mesh_access_region (self, mesh_name, bounding_box): """ @@ -1419,13 +1041,12 @@ cdef class Interface: assert len(bounding_box) > 0, "Bounding box cannot be empty." - assert len(bounding_box) == (self.get_dimensions() * 2), "Dimensions of bounding box in set_mesh_access_region does not match with dimensions in problem definition." + assert len(bounding_box) == (self.get_mesh_dimensions(mesh_name) * 2), "Dimensions of bounding box in set_mesh_access_region does not match with dimensions in problem definition." - cdef np.ndarray[double, ndim=1] _bounding_box = np.ascontiguousarray(bounding_box, dtype=np.double) + cdef vector[double] cpp_bounding_box = list(bounding_box) - cdef bytes mesh_name_py_bytes = mesh_name.encode() + self.thisptr.setMeshAccessRegion(convert(mesh_name), cpp_bounding_box) - self.thisptr.setMeshAccessRegion( mesh_name_py_bytes, _bounding_box.data) def get_mesh_vertices_and_ids (self, mesh_name): """ @@ -1447,15 +1068,17 @@ cdef class Interface: warnings.warn("The function get_mesh_vertices_and_ids is still experimental.") size = self.get_mesh_vertex_size(mesh_name) - cdef np.ndarray[int, ndim=1] _ids = np.empty(size, dtype=np.int32) - dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _coordinates = np.empty(size*dimensions, dtype=np.double) + dimensions = self.get_mesh_dimensions(mesh_name) + + cdef vector[int] cpp_ids = [-1 for _ in range(size)] + cdef vector[double] cpp_coordinates = [-1 for _ in range(size * dimensions)] - cdef bytes mesh_name_py_bytes = mesh_name.encode() + self.thisptr.getMeshVerticesAndIDs(convert(mesh_name), cpp_ids, cpp_coordinates) - self.thisptr.getMeshVerticesAndIDs( mesh_name_py_bytes, size, _ids.data, _coordinates.data) + cdef np.ndarray[int, ndim=1] np_ids = np.array(cpp_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] np_coordinates = np.array(cpp_coordinates, dtype=np.double) - return _ids, _coordinates.reshape((size, dimensions)) + return np_ids, np_coordinates.reshape((size, dimensions)) def get_version_information (): """ @@ -1463,4 +1086,4 @@ def get_version_information (): ------- Current preCICE version information """ - return SolverInterface.getVersionInformation() + return CppParticipant.getVersionInformation() diff --git a/examples/solverdummy/solverdummy.py b/examples/solverdummy/solverdummy.py index 3af107e9..c2ac95b2 100644 --- a/examples/solverdummy/solverdummy.py +++ b/examples/solverdummy/solverdummy.py @@ -34,42 +34,46 @@ solver_process_index = 0 solver_process_size = 1 -interface = precice.Interface(participant_name, configuration_file_name, - solver_process_index, solver_process_size) +participant = precice.Participant(participant_name, configuration_file_name, + solver_process_index, solver_process_size) -assert (interface.requires_mesh_connectivity_for(mesh_name) is False) +assert (participant.requires_mesh_connectivity_for(mesh_name) is False) -dimensions = interface.get_dimensions() - -vertices = np.zeros((num_vertices, dimensions)) -read_data = np.zeros((num_vertices, dimensions)) -write_data = np.zeros((num_vertices, dimensions)) +vertices = np.zeros((num_vertices, participant.get_mesh_dimensions(mesh_name))) +read_data = np.zeros((num_vertices, participant.get_data_dimensions(mesh_name, read_data_name))) +write_data = np.zeros((num_vertices, participant.get_data_dimensions(mesh_name, write_data_name))) for x in range(num_vertices): - for y in range(0, dimensions): + for y in range(participant.get_mesh_dimensions(mesh_name)): vertices[x, y] = x + + for y in range(participant.get_data_dimensions(mesh_name, read_data_name)): read_data[x, y] = x + + for y in range(participant.get_data_dimensions(mesh_name, write_data_name)): write_data[x, y] = x -vertex_ids = interface.set_mesh_vertices(mesh_name, vertices) +vertex_ids = participant.set_mesh_vertices(mesh_name, vertices) -dt = interface.initialize() +participant.initialize() -while interface.is_coupling_ongoing(): - if interface.requires_writing_checkpoint(): +while participant.is_coupling_ongoing(): + if participant.requires_writing_checkpoint(): print("DUMMY: Writing iteration checkpoint") - read_data = interface.read_block_vector_data(mesh_name, read_data_name, vertex_ids) + dt = participant.get_max_time_step_size() + read_data = participant.read_data(mesh_name, read_data_name, vertex_ids, dt) write_data = read_data + 1 - interface.write_block_vector_data(mesh_name, write_data_name, vertex_ids, write_data) + participant.write_data(mesh_name, write_data_name, vertex_ids, write_data) print("DUMMY: Advancing in time") - dt = interface.advance(dt) + dt = participant.get_max_time_step_size() + participant.advance(dt) - if interface.requires_reading_checkpoint(): + if participant.requires_reading_checkpoint(): print("DUMMY: Reading iteration checkpoint") -interface.finalize() +participant.finalize() print("DUMMY: Closing python solver dummy...") diff --git a/precice/__init__.py b/precice/__init__.py index 45f24cb2..0022eb2f 100644 --- a/precice/__init__.py +++ b/precice/__init__.py @@ -2,14 +2,7 @@ __version__ = "unknown" import warnings -from cyprecice import Interface, get_version_information - - -def SolverInterface(*args): - """ - This is just a dummy function to avoid wrong usage of the interface. Please use precice.Interface, if you want to establish a connection to preCICE. See https://github.com/precice/python-bindings/issues/92 for more information. - """ - warnings.warn("please use precice.Interface to create the interface to C++ preCICE. Note that this function (precice.SolverInterface) does not do anything but throwing this warning. See https://github.com/precice/python-bindings/issues/92 for more information.") +from cyprecice import Participant, get_version_information __version__ = get_versions()['version'] diff --git a/setup.py b/setup.py index 165469d0..dd16072a 100644 --- a/setup.py +++ b/setup.py @@ -76,7 +76,7 @@ def get_extensions(is_test): link_args += pkgconfig.libs('libprecice').split() if is_test: bindings_sources.append(os.path.join(PYTHON_BINDINGS_PATH, "test", - "SolverInterface.cpp")) + "Participant.cpp")) return [ Extension( diff --git a/test/Participant.cpp b/test/Participant.cpp new file mode 100644 index 00000000..ea9b35dc --- /dev/null +++ b/test/Participant.cpp @@ -0,0 +1,338 @@ +#include "precice/Participant.hpp" +#include "precice/Tooling.hpp" +#include +#include +#include +#include + +std::string fake_version; +std::vector fake_read_write_buffer; +int fake_mesh_dimensions; +int fake_scalar_data_dimensions; +int fake_vector_data_dimensions; +std::vector fake_ids; +int n_fake_vertices; +std::string fake_mesh_name; +std::string fake_scalar_data_name; +std::string fake_vector_data_name; +int fake_data_id; +std::vector fake_bounding_box; +std::vector fake_coordinates; + +namespace precice { + +namespace impl{ +class ParticipantImpl{}; +} + +Participant:: Participant +( + precice::string_view participantName, + precice::string_view configurationFileName, + int solverProcessIndex, + int solverProcessSize ) +{ + fake_version = "dummy"; + fake_read_write_buffer = std::vector(); + fake_mesh_dimensions = 3; + fake_scalar_data_dimensions = 1; + fake_vector_data_dimensions = 3; + fake_data_id = 15; + fake_mesh_name = "FakeMesh"; + fake_scalar_data_name = "FakeScalarData"; + fake_vector_data_name = "FakeVectorData"; + n_fake_vertices = 3; + fake_ids.resize(n_fake_vertices); + std::iota(fake_ids.begin(), fake_ids.end(), 0); + fake_bounding_box.resize(fake_mesh_dimensions*2); + std::iota(fake_bounding_box.begin(), fake_bounding_box.end(), 0); + fake_coordinates.resize(n_fake_vertices*fake_mesh_dimensions); + std::iota(fake_coordinates.begin(), fake_coordinates.end(), 0); +} + +Participant::Participant( + precice::string_view participantName, + precice::string_view configurationFileName, + int solverProcessIndex, + int solverProcessSize, + void * communicator) +{ + fake_version = "dummy"; + fake_read_write_buffer = std::vector(); + fake_mesh_dimensions = 3; + fake_scalar_data_dimensions = 1; + fake_vector_data_dimensions = 3; + fake_data_id = 15; + fake_mesh_name = "FakeMesh"; + fake_scalar_data_name = "FakeScalarData"; + fake_vector_data_name = "FakeVectorData"; + n_fake_vertices = 3; + fake_ids.resize(n_fake_vertices); + std::iota(fake_ids.begin(), fake_ids.end(), 0); + fake_bounding_box.resize(fake_mesh_dimensions*2); + std::iota(fake_bounding_box.begin(), fake_bounding_box.end(), 0); +} + +Participant::~Participant() = default; + +void Participant:: initialize() +{ +} + +void Participant:: advance +( + double computedTimestepLength) +{ +} + +void Participant:: finalize() +{} + +int Participant:: getMeshDimensions +( + precice::string_view meshName) const +{ + return fake_mesh_dimensions; +} + +int Participant:: getDataDimensions +( + precice::string_view meshName, + precice::string_view dataName) const +{ + if (dataName.data() == fake_scalar_data_name) { + return fake_scalar_data_dimensions; + } else if (dataName.data() == fake_vector_data_name) { + return fake_vector_data_dimensions; + } else { + return -1; + } +} + +bool Participant:: isCouplingOngoing() const +{ + return 0; +} + +bool Participant:: isTimeWindowComplete() const +{ + return 0; +} + +double Participant:: getMaxTimeStepSize() const +{ + return -1.0; +} + +bool Participant:: requiresInitialData() +{ + return 0; +} + +bool Participant:: requiresReadingCheckpoint() +{ + return 0; +} + +bool Participant:: requiresWritingCheckpoint() +{ + return 0; +} + +bool Participant:: hasMesh +( + precice::string_view meshName ) const +{ + return 0; +} + +bool Participant:: requiresMeshConnectivityFor +( + precice::string_view meshName) const +{ + return 0; +} + +bool Participant:: requiresGradientDataFor +( + precice::string_view meshName, + precice::string_view dataName) const +{ + return 0; +} + +bool Participant:: hasData +( + precice::string_view meshName, + precice::string_view dataName) const +{ + return 0; +} + +int Participant:: setMeshVertex +( + precice::string_view meshName, + precice::span position ) +{ + return 0; +} + +int Participant:: getMeshVertexSize +( + precice::string_view meshName) const +{ + return n_fake_vertices; +} + +void Participant:: setMeshVertices +( + precice::string_view meshName, + precice::span positions, + precice::span ids) +{ + if(ids.size() > 0) { + assert (ids.size() == fake_ids.size()); + std::copy(fake_ids.begin(), fake_ids.end(), ids.data()); + } +} + +void Participant:: setMeshEdge +( + precice::string_view meshName, + int firstVertexID, + int secondVertexID) +{} + +void Participant::setMeshEdges( + precice::string_view meshName, + precice::span vertices) +{} + +void Participant:: setMeshTriangle +( + precice::string_view meshName, + int firstVertexID, + int secondVertexID, + int thirdVertexID ) +{} + +void Participant:: setMeshTriangles +( + precice::string_view meshName, + precice::span vertices ) +{} + +void Participant:: setMeshQuad +( + precice::string_view meshName, + int firstVertexID, + int secondVertexID, + int thirdVertexID, + int fourthVertexID ) +{} + +void Participant:: setMeshQuads +( + precice::string_view meshName, + precice::span vertices) +{} + +void Participant::setMeshTetrahedron +( + precice::string_view meshName, + int firstVertexID, + int secondVertexID, + int thirdVertexID, + int fourthVertexID) +{} + +void Participant::setMeshTetrahedra +( + precice::string_view meshName, + precice::span vertices) +{} + +void Participant:: writeData +( + precice::string_view meshName, + precice::string_view dataName, + precice::span vertices, + precice::span values) +{ + fake_read_write_buffer.clear(); + + for(const double value: values) { + fake_read_write_buffer.push_back(value); + } +} + +void Participant:: readData +( + precice::string_view meshName, + precice::string_view dataName, + precice::span vertices, + double relativeReadTime, + precice::span values) const +{ + if (dataName.data() == fake_scalar_data_name) { + for(const int id: vertices) { + values[id] = fake_read_write_buffer[id]; + } + } else if (dataName.data() == fake_vector_data_name) { + for(const int id: vertices) { + for(int d = 0; d < fake_vector_data_dimensions; d++) { + const int linearized_id = fake_vector_data_dimensions * id + d; + values[linearized_id] = fake_read_write_buffer[linearized_id]; + } + } + } +} + +void Participant:: setMeshAccessRegion +( + precice::string_view meshName, + precice::span boundingBox ) const +{ + assert(meshName == fake_mesh_name); + + for(std::size_t i = 0; i < fake_bounding_box.size(); i++){ + assert(boundingBox[i] == fake_bounding_box[i]); + } +} + +void Participant:: getMeshVerticesAndIDs +( + precice::string_view meshName, + precice::span valueIndices, + precice::span coordinates ) const +{ + assert(meshName == fake_mesh_name); + assert(valueIndices.size() == fake_ids.size()); + assert(coordinates.size() == fake_coordinates.size()); + + for(std::size_t i = 0; i < fake_ids.size(); i++){ + valueIndices[i] = fake_ids[i]; + } + for(std::size_t i = 0; i < fake_coordinates.size(); i++){ + coordinates[i] = fake_coordinates[i]; + } +} + +void Participant::writeGradientData( + precice::string_view meshName, + precice::string_view dataName, + precice::span vertices, + precice::span gradients) +{ + fake_read_write_buffer.clear(); + for (const double gradient: gradients) { + fake_read_write_buffer.push_back(gradient); + } +} + +std::string getVersionInformation() +{ + return fake_version; +} + +} // namespace precice \ No newline at end of file diff --git a/test/SolverInterface.cpp b/test/SolverInterface.cpp deleted file mode 100644 index 270eaf29..00000000 --- a/test/SolverInterface.cpp +++ /dev/null @@ -1,465 +0,0 @@ -#include "precice/SolverInterface.hpp" -#include "precice/Tooling.hpp" -#include -#include -#include -#include -#include - -std::string fake_version; -std::vector fake_read_write_buffer; -int fake_dimensions; -std::vector fake_ids; -int n_fake_vertices; -std::string fake_mesh_name; -std::string fake_data_name; -int fake_data_id; -std::vector fake_bounding_box; -std::vector fake_coordinates; - -namespace precice { - -namespace impl{ -class SolverInterfaceImpl{}; -} - -SolverInterface:: SolverInterface -( - std::string_view participantName, - std::string_view configurationFileName, - int solverProcessIndex, - int solverProcessSize ) -{ - fake_version = "dummy"; - fake_read_write_buffer = std::vector(); - fake_dimensions = 3; - fake_data_id = 15; - fake_mesh_name = "FakeMesh"; - fake_data_name = "FakeData"; - n_fake_vertices = 3; - fake_ids.resize(n_fake_vertices); - std::iota(fake_ids.begin(), fake_ids.end(), 0); - fake_bounding_box.resize(fake_dimensions*2); - std::iota(fake_bounding_box.begin(), fake_bounding_box.end(), 0); - fake_coordinates.resize(n_fake_vertices*fake_dimensions); - std::iota(fake_coordinates.begin(), fake_coordinates.end(), 0); -} - -SolverInterface::SolverInterface( - std::string_view participantName, - std::string_view configurationFileName, - int solverProcessIndex, - int solverProcessSize, - void * communicator) -{ - fake_version = "dummy"; - fake_read_write_buffer = std::vector(); - fake_dimensions = 3; - fake_data_id = 15; - fake_mesh_name = "FakeMesh"; - fake_data_name = "FakeData"; - n_fake_vertices = 3; - fake_ids.resize(n_fake_vertices); - std::iota(fake_ids.begin(), fake_ids.end(), 0); - fake_bounding_box.resize(fake_dimensions*2); - std::iota(fake_bounding_box.begin(), fake_bounding_box.end(), 0); -} - -SolverInterface::~SolverInterface() = default; - -double SolverInterface:: initialize() -{ - return -1; -} - -double SolverInterface:: advance -( - double computedTimestepLength ) -{ - return -1; -} - -void SolverInterface:: finalize() -{} - -int SolverInterface:: getDimensions() const -{ - return fake_dimensions; -} - -bool SolverInterface:: isCouplingOngoing() const -{ - return 0; -} - -bool SolverInterface:: isTimeWindowComplete() const -{ - return 0; -} - -bool SolverInterface:: requiresInitialData() -{ - return 0; -} - -bool SolverInterface:: requiresReadingCheckpoint() -{ - return 0; -} - -bool SolverInterface:: requiresWritingCheckpoint() -{ - return 0; -} - -bool SolverInterface:: hasMesh -( - std::string_view meshName ) const -{ - return 0; -} - -bool SolverInterface:: requiresMeshConnectivityFor -( - std::string_view meshName) const -{ - return 0; -} - -bool SolverInterface::requiresGradientDataFor -( - std::string_view meshName, - std::string_view dataName) const -{ - return 0; -} - -bool SolverInterface:: hasData -( - std::string_view dataName, - std::string_view meshName) const -{ - return 0; -} - -int SolverInterface:: setMeshVertex -( - std::string_view meshName, - const double* position ) -{ - return 0; -} - -int SolverInterface:: getMeshVertexSize -( - std::string_view meshName) const -{ - return n_fake_vertices; -} - -void SolverInterface:: setMeshVertices -( - std::string_view meshName, - int size, - const double* positions, - int* ids ) -{ - assert (size == fake_ids.size()); - std::copy(fake_ids.begin(), fake_ids.end(), ids); -} - -void SolverInterface:: setMeshEdge -( - std::string_view meshName, - int firstVertexID, - int secondVertexID ) -{} - -void SolverInterface::setMeshEdges( - std::string_view meshName, - int size, - const int * vertices) -{} - -void SolverInterface:: setMeshTriangle -( - std::string_view meshName, - int firstVertexID, - int secondVertexID, - int thirdVertexID ) -{} - -void SolverInterface:: setMeshTriangles -( - std::string_view meshName, - int size, - const int * vertices ) -{} - -void SolverInterface:: setMeshQuad -( - std::string_view meshName, - int firstVertexID, - int secondVertexID, - int thirdVertexID, - int fourthVertexID ) -{} - -void SolverInterface:: setMeshQuads -( - std::string_view meshName, - int size, - const int * vertices) -{} - -void SolverInterface::setMeshTetrahedron( - std::string_view meshName, - int firstVertexID, - int secondVertexID, - int thirdVertexID, - int fourthVertexID) -{} - -void SolverInterface::setMeshTetrahedra( - std::string_view meshName, - int size, - const int * vertices) -{} - -void SolverInterface:: writeBlockVectorData -( - std::string_view meshName, - std::string_view dataName, - int size, - const int* valueIndices, - const double* values ) -{ - fake_read_write_buffer.clear(); - for(int i = 0; i < size * this->getDimensions(); i++){ - fake_read_write_buffer.push_back(values[i]); - } -} - -void SolverInterface:: writeVectorData -( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - const double* value ) -{ - fake_read_write_buffer.clear(); - for(int i = 0; i < this->getDimensions(); i++){ - fake_read_write_buffer.push_back(value[i]); - } -} - -void SolverInterface:: writeBlockScalarData -( - std::string_view meshName, - std::string_view dataName, - int size, - const int* valueIndices, - const double* values ) -{ - fake_read_write_buffer.clear(); - for(int i = 0; i < size; i++){ - fake_read_write_buffer.push_back(values[i]); - } -} - -void SolverInterface:: writeScalarData -( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - double value ) -{ - fake_read_write_buffer.clear(); - fake_read_write_buffer.push_back(value); -} - -void SolverInterface::writeBlockVectorGradientData( - std::string_view meshName, - std::string_view dataName, - int size, - const int *valueIndices, - const double *gradientValues) -{ - fake_read_write_buffer.clear(); - for (int i = 0; i < size * this->getDimensions() * this->getDimensions(); i++) { - fake_read_write_buffer.push_back(gradientValues[i]); - } -} - -void SolverInterface::writeScalarGradientData( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - const double *gradientValues) -{ - fake_read_write_buffer.clear(); - for (int i = 0; i < this->getDimensions(); i++) { - fake_read_write_buffer.push_back(gradientValues[i]); - } -} -void SolverInterface::writeBlockScalarGradientData( - std::string_view meshName, - std::string_view dataName, - int size, - const int *valueIndices, - const double *gradientValues) -{ - fake_read_write_buffer.clear(); - for (int i = 0; i < size * this->getDimensions(); i++) { - fake_read_write_buffer.push_back(gradientValues[i]); - } -} - -void SolverInterface::writeVectorGradientData( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - const double *gradientValues) -{ - fake_read_write_buffer.clear(); - for (int i = 0; i < this->getDimensions() * this->getDimensions(); i++) { - fake_read_write_buffer.push_back(gradientValues[i]); - } -} - -void SolverInterface:: readBlockVectorData -( - std::string_view meshName, - std::string_view dataName, - int size, - const int* valueIndices, - double* values ) const -{ - for(int i = 0; i < size * this->getDimensions(); i++){ - values[i] = fake_read_write_buffer[i]; - } -} - -void SolverInterface:: readBlockVectorData -( - std::string_view meshName, - std::string_view dataName, - int size, - const int* valueIndices, - double relativeReadTime, - double* values ) const -{ - for(int i = 0; i < size * this->getDimensions(); i++){ - values[i] = fake_read_write_buffer[i]; - } -} - -void SolverInterface:: readVectorData -( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - double* value ) const -{ - for(int i = 0; i < this->getDimensions(); i++){ - value[i] = fake_read_write_buffer[i]; - } -} - -void SolverInterface:: readVectorData -( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - double relativeReadTime, - double* value ) const -{ - for(int i = 0; i < this->getDimensions(); i++){ - value[i] = fake_read_write_buffer[i]; - } -} - -void SolverInterface:: readBlockScalarData -( - std::string_view meshName, - std::string_view dataName, - int size, - const int* valueIndices, - double* values ) const -{ - for(int i = 0; i < size; i++){ - values[i] = fake_read_write_buffer[i]; - } -} - -void SolverInterface:: readBlockScalarData -( - std::string_view meshName, - std::string_view dataName, - int size, - const int* valueIndices, - double relativeReadTime, - double* values ) const -{ - for(int i = 0; i < size; i++){ - values[i] = fake_read_write_buffer[i]; - } -} - -void SolverInterface:: readScalarData -( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - double& value ) const -{ - value = fake_read_write_buffer[0]; -} - -void SolverInterface:: readScalarData -( - std::string_view meshName, - std::string_view dataName, - int valueIndex, - double relativeReadTime, - double& value ) const -{ - value = fake_read_write_buffer[0]; -} - -void SolverInterface:: setMeshAccessRegion -( - std::string_view meshName, - const double* boundingBox ) const -{ - assert(meshName == fake_mesh_name); - - for(std::size_t i = 0; i < fake_bounding_box.size(); i++){ - assert(boundingBox[i] == fake_bounding_box[i]); - } -} - -void SolverInterface:: getMeshVerticesAndIDs -( - std::string_view meshName, - const int size, - int* valueIndices, - double* coordinates ) const -{ - assert(meshName == fake_mesh_name); - assert(size == fake_ids.size()); - - for(std::size_t i = 0; i < fake_ids.size(); i++){ - valueIndices[i] = fake_ids[i]; - } - for(std::size_t i = 0; i < fake_coordinates.size(); i++){ - coordinates[i] = fake_coordinates[i]; - } -} - -std::string getVersionInformation() -{ - return fake_version; -} - -} // namespace precice \ No newline at end of file diff --git a/test/test_bindings_module.py b/test/test_bindings_module.py index c1de6cdc..e7c18d5e 100644 --- a/test/test_bindings_module.py +++ b/test/test_bindings_module.py @@ -10,54 +10,61 @@ class TestBindings(TestCase): """ def test_constructor(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) self.assertTrue(True) def test_constructor_custom_mpi_comm(self): - solver_interface = precice.Interface( + participant = precice.Participant( "test", "dummy.xml", 0, 1, MPI.COMM_WORLD) self.assertTrue(True) def test_version(self): precice.__version__ - def test_get_dimensions(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + def test_get_mesh_dimensions(self): + participant = precice.Participant("test", "dummy.xml", 0, 1) # TODO: it would be nice to be able to mock the output of the interface # directly in the test, not in test/SolverInterface.hpp - fake_dimension = 3 # compare to test/SolverInterface.hpp, fake_dimensions + fake_mesh_dimension = 3 # compare to test/SolverInterface.hpp, fake_mesh_dimension # TODO: it would be nice to be able to mock the output of the interface # directly in the test, not in test/SolverInterface.hpp - self.assertEqual(fake_dimension, solver_interface.get_dimensions()) + self.assertEqual(fake_mesh_dimension, participant.get_mesh_dimensions("dummy")) + + def test_get_data_dimensions(self): + participant = precice.Participant("test", "dummy.xml", 0, 1) + fake_scalar_data_dimension = 1 # compare to test/SolverInterface.hpp, fake_scalar_data_dimension + self.assertEqual(fake_scalar_data_dimension, participant.get_data_dimensions("dummy", "FakeScalarData")) + fake_vector_data_dimension = 3 # compare to test/SolverInterface.hpp, fake_vector_data_dimension + self.assertEqual(fake_vector_data_dimension, participant.get_data_dimensions("dummy", "FakeVectorData")) def test_requires_mesh_connectivity_for(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_bool = 0 # compare to output in test/SolverInterface.cpp fake_mesh_name = "FakeMesh" - self.assertEqual(fake_bool, solver_interface.requires_mesh_connectivity_for(fake_mesh_name)) + self.assertEqual(fake_bool, participant.requires_mesh_connectivity_for(fake_mesh_name)) def test_set_mesh_vertices(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices positions = np.random.rand(n_fake_vertices, fake_dimension) expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertices_empty(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions n_fake_vertices = 0 # compare to test/SolverInterface.cpp, n_fake_vertices - positions = np.random.rand(n_fake_vertices, fake_dimension) + positions = np.zeros((n_fake_vertices, fake_dimension)) expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertices_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices @@ -65,20 +72,20 @@ def test_set_mesh_vertices_list(self): positions = list(list(positions[i, j] for j in range( positions.shape[1])) for i in range(positions.shape[0])) expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertices_empty_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name positions = [] n_fake_vertices = 0 expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertices_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices @@ -86,20 +93,20 @@ def test_set_mesh_vertices_tuple(self): positions = tuple(tuple(positions[i, j] for j in range( positions.shape[1])) for i in range(positions.shape[0])) expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertices_empty_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name positions = () n_fake_vertices = 0 expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertices_mixed(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices @@ -107,82 +114,84 @@ def test_set_mesh_vertices_mixed(self): positions = list(tuple(positions[i, j] for j in range( positions.shape[1])) for i in range(positions.shape[0])) expected_output = np.array(range(n_fake_vertices)) - actual_output = solver_interface.set_mesh_vertices(fake_mesh_name, positions) + actual_output = participant.set_mesh_vertices(fake_mesh_name, positions) self.assertTrue(np.array_equal(expected_output, actual_output)) def test_set_mesh_vertex(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions position = np.random.rand(fake_dimension) - vertex_id = solver_interface.set_mesh_vertex(fake_mesh_name, position) + vertex_id = participant.set_mesh_vertex(fake_mesh_name, position) self.assertTrue(0 == vertex_id) def test_set_mesh_vertex_empty(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 0 # compare to test/SolverInterface.cpp, fake_dimensions position = np.random.rand(fake_dimension) - vertex_id = solver_interface.set_mesh_vertex(fake_mesh_name, position) + vertex_id = participant.set_mesh_vertex(fake_mesh_name, position) self.assertTrue(0 == vertex_id) def test_set_mesh_vertex_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions position = list(np.random.rand(fake_dimension)) - vertex_id = solver_interface.set_mesh_vertex(fake_mesh_name, position) + vertex_id = participant.set_mesh_vertex(fake_mesh_name, position) self.assertTrue(0 == vertex_id) def test_set_mesh_vertex_empty_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name position = [] - vertex_id = solver_interface.set_mesh_vertex(fake_mesh_name, position) + vertex_id = participant.set_mesh_vertex(fake_mesh_name, position) self.assertTrue(0 == vertex_id) def test_set_mesh_vertex_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions position = tuple(np.random.rand(fake_dimension)) - vertex_id = solver_interface.set_mesh_vertex(fake_mesh_name, position) + vertex_id = participant.set_mesh_vertex(fake_mesh_name, position) self.assertTrue(0 == vertex_id) def test_set_mesh_vertex_empty_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name position = () - vertex_id = solver_interface.set_mesh_vertex(fake_mesh_name, position) + vertex_id = participant.set_mesh_vertex(fake_mesh_name, position) self.assertTrue(0 == vertex_id) def test_get_mesh_vertex_size(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices - n_vertices = solver_interface.get_mesh_vertex_size(fake_mesh_name) + n_vertices = participant.get_mesh_vertex_size(fake_mesh_name) self.assertTrue(n_fake_vertices == n_vertices) def test_read_write_block_scalar_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = np.array([3, 7, 8], dtype=np.double) - solver_interface.write_block_scalar_data("FakeMesh", "FakeData", np.array([1, 2, 3]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array([1, 2, 3])) + participant.write_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_block_scalar_data_single_float(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = 8 with self.assertRaises(TypeError): - solver_interface.write_block_scalar_data("FakeMesh", "FakeData", 1, write_data) + participant.write_data("FakeMesh", "FakeScalarData", 1, write_data) with self.assertRaises(TypeError): - solver_interface.read_block_scalar_data("FakeMesh", "FakeData", 1) + participant.read_data("FakeMesh", "FakeScalarData", 1) def test_read_write_block_scalar_data_empty(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = np.array([]) - solver_interface.write_block_scalar_data("FakeMesh", "FakeData", [], write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", []) + participant.write_data("FakeMesh", "FakeScalarData", [], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", [], dt) self.assertTrue(len(read_data) == 0) def test_read_write_block_scalar_data_non_contiguous(self): @@ -191,55 +200,62 @@ def test_read_write_block_scalar_data_non_contiguous(self): Note: Check whether np.ndarray is contiguous via np.ndarray.flags. """ - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) dummy_array = np.random.rand(3, 3) write_data = dummy_array[:, 1] assert (write_data.flags["C_CONTIGUOUS"] is False) - solver_interface.write_block_scalar_data("FakeMesh", "FakeData", np.array([1, 2, 3]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array([1, 2, 3])) + participant.write_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_scalar_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = 3 - solver_interface.write_scalar_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_scalar_data("FakeMesh", "FakeData", 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = [3] + participant.write_data("FakeMesh", "FakeScalarData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", [0], dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_block_vector_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = np.array([[3, 7, 8], [7, 6, 5]], dtype=np.double) - solver_interface.write_block_vector_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", np.array([1, 2])) + participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_block_vector_data_empty(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = np.array([]) - solver_interface.write_block_vector_data("FakeMesh", "FakeData", [], write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", []) + participant.write_data("FakeMesh", "FakeVectorData", [], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", [], dt) self.assertTrue(len(read_data) == 0) def test_read_write_block_vector_data_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = [[3, 7, 8], [7, 6, 5]] - solver_interface.write_block_vector_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", np.array([1, 2])) + participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_block_vector_data_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = ((3, 7, 8), (7, 6, 5)) - solver_interface.write_block_vector_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", np.array([1, 2])) + participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_block_vector_data_mixed(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = [(3, 7, 8), (7, 6, 5)] - solver_interface.write_block_vector_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", np.array([1, 2])) + participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_block_vector_data_non_contiguous(self): @@ -248,35 +264,39 @@ def test_read_write_block_vector_data_non_contiguous(self): Note: Check whether np.ndarray is contiguous via np.ndarray.flags. """ - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) size = 6 dummy_array = np.random.rand(size, 5) write_data = dummy_array[:, 1:4] assert (write_data.flags["C_CONTIGUOUS"] is False) vertex_ids = np.arange(size) - solver_interface.write_block_vector_data("FakeMesh", "FakeData", vertex_ids, write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", vertex_ids) + participant.write_data("FakeMesh", "FakeVectorData", vertex_ids, write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", vertex_ids, dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_vector_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = np.array([1, 2, 3], dtype=np.double) - solver_interface.write_vector_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_vector_data("FakeMesh", "FakeData", 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = np.array([[0, 1, 2]], dtype=np.double) + participant.write_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_vector_data_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = [1, 2, 3] - solver_interface.write_vector_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_vector_data("FakeMesh", "FakeData", 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = [[0, 1, 2]] + participant.write_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_vector_data_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = (1, 2, 3) - solver_interface.write_vector_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_vector_data("FakeMesh", "FakeData", 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = [(1, 2, 3)] + participant.write_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_read_write_vector_data_non_contiguous(self): @@ -285,12 +305,14 @@ def test_read_write_vector_data_non_contiguous(self): Note: Check whether np.ndarray is contiguous via np.ndarray.flags. """ - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) dummy_array = np.random.rand(3, 3) write_data = dummy_array[:, 1] assert (write_data.flags["C_CONTIGUOUS"] is False) - solver_interface.write_vector_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_vector_data("FakeMesh", "FakeData", 1) + write_data = [write_data] + participant.write_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt) self.assertTrue(np.array_equal(write_data, read_data)) def test_get_version_information(self): @@ -299,14 +321,14 @@ def test_get_version_information(self): self.assertEqual(version_info, fake_version_info) def test_set_mesh_access_region(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions fake_bounding_box = np.arange(fake_dimension * 2) - solver_interface.set_mesh_access_region(fake_mesh_name, fake_bounding_box) + participant.set_mesh_access_region(fake_mesh_name, fake_bounding_box) def test_get_mesh_vertices_and_ids(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions @@ -316,148 +338,162 @@ def test_get_mesh_vertices_and_ids(self): coordinates[i, 0] = i * fake_dimension coordinates[i, 1] = i * fake_dimension + 1 coordinates[i, 2] = i * fake_dimension + 2 - fake_ids, fake_coordinates = solver_interface.get_mesh_vertices_and_ids(fake_mesh_name) + fake_ids, fake_coordinates = participant.get_mesh_vertices_and_ids(fake_mesh_name) self.assertTrue(np.array_equal(fake_ids, vertex_ids)) self.assertTrue(np.array_equal(fake_coordinates, coordinates)) def test_requires_gradient_data_for(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_bool = 0 # compare to output in test/SolverInterface.cpp fake_mesh_name = "FakeMesh" fake_data_name = "FakeName" - self.assertEqual(fake_bool, solver_interface.requires_gradient_data_for(fake_mesh_name, fake_data_name)) + self.assertEqual(fake_bool, participant.requires_gradient_data_for(fake_mesh_name, fake_data_name)) def test_write_block_scalar_gradient_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = np.array([[1, 2, 3], [6, 7, 8], [9, 10, 11]], dtype=np.double) - solver_interface.write_block_scalar_gradient_data("FakeMesh", "FakeData", np.array([1, 2, 3]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(9))) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = np.array([[0, 1, 2], [6, 7, 8], [9, 10, 11]], dtype=np.double) + participant.write_gradient_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array(range(9)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_block_scalar_gradient_data_single_float(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_dimension = 3 - n_fake_vertices = 4 + n_fake_vertices = 1 vertex_ids = np.arange(n_fake_vertices) write_data = np.random.rand(n_fake_vertices, fake_dimension) - solver_interface.write_block_scalar_gradient_data("FakeMesh", "FakeData", vertex_ids, write_data) - read_data = solver_interface.read_block_vector_data("FakeMesh", "FakeData", vertex_ids) - self.assertTrue(np.array_equal(write_data, read_data)) + participant.write_gradient_data("FakeMesh", "FakeScalarData", vertex_ids, write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", np.arange(n_fake_vertices * fake_dimension), dt) + self.assertTrue(np.array_equal(write_data.flatten(), read_data)) def test_write_block_scalar_gradient_data_empty(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = np.array([]) - solver_interface.write_block_scalar_gradient_data("FakeMesh", "FakeData", [], write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", []) + participant.write_gradient_data("FakeMesh", "FakeScalarData", [], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", [], dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_block_scalar_gradient_data_non_contiguous(self): """ - Tests behaviour of solver interface, if a non contiguous array is passed to the interface. - + Tests behavior of solver interface, if a non contiguous array is passed to the interface. Note: Check whether np.ndarray is contiguous via np.ndarray.flags. """ - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) dummy_array = np.random.rand(3, 9) write_data = dummy_array[:, 3:6] assert write_data.flags["C_CONTIGUOUS"] is False - solver_interface.write_block_scalar_gradient_data("FakeMesh", "FakeData", np.array([1, 2, 3]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(9))) + participant.write_gradient_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array(range(9)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_scalar_gradient_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_dimension = 3 - write_data = np.random.rand(fake_dimension) - solver_interface.write_scalar_gradient_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_vector_data("FakeMesh", "FakeData", 1) + write_data = [np.random.rand(fake_dimension)] + participant.write_gradient_data("FakeMesh", "FakeScalarData", [0], write_data) + dt = 1 + # Gradient data is essential vector data, hence the appropriate data name is used here + read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt) self.assertTrue(np.array_equiv(write_data, read_data)) def test_write_block_vector_gradient_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) fake_dimension = 3 n_fake_vertices = 4 vertex_ids = np.arange(n_fake_vertices) write_data = np.random.rand(n_fake_vertices, fake_dimension * fake_dimension) - solver_interface.write_block_vector_gradient_data("FakeMesh", "FakeData", vertex_ids, write_data) - read_data = solver_interface.read_block_vector_data( - "FakeMesh", "FakeData", np.array(range(n_fake_vertices * fake_dimension))) + participant.write_gradient_data("FakeMesh", "FakeVectorData", vertex_ids, write_data) + dt = 1 + read_data = participant.read_data( + "FakeMesh", "FakeVectorData", np.array(range(n_fake_vertices * fake_dimension)), dt) self.assertTrue(np.array_equiv(write_data.flatten(), read_data.flatten())) def test_write_block_vector_gradient_data_empty(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = np.array([]) - solver_interface.write_block_vector_gradient_data("FakeMesh", "FakeData", [], write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", []) + participant.write_gradient_data("FakeMesh", "FakeVectorData", [], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", [], dt) self.assertTrue(len(read_data) == 0) def test_write_block_vector_gradient_data_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = [[3.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0]] - solver_interface.write_block_vector_gradient_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(18))) + participant.write_gradient_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(6)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_block_vector_gradient_data_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = ((1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 3.0, 7.0, 8.0), (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0)) - solver_interface.write_block_vector_gradient_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(18))) + participant.write_gradient_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(6)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_block_vector_gradient_data_mixed(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) write_data = [(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 3.0, 7.0, 8.0), (4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 7.0, 6.0, 5.0)] - solver_interface.write_block_vector_gradient_data("FakeMesh", "FakeData", np.array([1, 2]), write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(18))) + participant.write_gradient_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(6)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_block_vector_gradient_data_non_contiguous(self): """ - Tests behaviour of solver interface, if a non contiguous array is passed to the interface. - + Tests behavior of solver interface, if a non contiguous array is passed to the interface. Note: Check whether np.ndarray is contiguous via np.ndarray.flags. """ - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) dummy_array = np.random.rand(3, 15) write_data = dummy_array[:, 2:11] assert write_data.flags["C_CONTIGUOUS"] is False vertex_ids = np.arange(3) - solver_interface.write_block_vector_gradient_data("FakeMesh", "FakeData", vertex_ids, write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(27))) + participant.write_gradient_data("FakeMesh", "FakeVectorData", vertex_ids, write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(9)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_vector_gradient_data(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = np.arange(0, 9, dtype=np.double) - solver_interface.write_vector_gradient_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(9))) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = [np.arange(0, 9, dtype=np.double)] + participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_vector_gradient_data_list(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] - solver_interface.write_vector_gradient_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(9))) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]] + participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_vector_gradient_data_tuple(self): - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) - write_data = (1.0, 2.0, 3.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0) - solver_interface.write_vector_gradient_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(9))) + participant = precice.Participant("test", "dummy.xml", 0, 1) + write_data = [(1.0, 2.0, 3.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0)] + participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) def test_write_vector_gradient_data_non_contiguous(self): """ - Tests behaviour of solver interface, if a non contiguous array is passed to the interface. - + Tests behavior of solver interface, if a non contiguous array is passed to the interface. Note: Check whether np.ndarray is contiguous via np.ndarray.flags. """ - solver_interface = precice.Interface("test", "dummy.xml", 0, 1) + participant = precice.Participant("test", "dummy.xml", 0, 1) dummy_array = np.random.rand(9, 3) write_data = dummy_array[:, 1] assert write_data.flags["C_CONTIGUOUS"] is False - solver_interface.write_vector_gradient_data("FakeMesh", "FakeData", 1, write_data) - read_data = solver_interface.read_block_scalar_data("FakeMesh", "FakeData", np.array(range(9))) + write_data = [write_data] + participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data) + dt = 1 + read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt) self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten())) From e0012bac4ba76e42c077750eacab19616fab9d6f Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Wed, 21 Jun 2023 10:27:46 +0200 Subject: [PATCH 08/33] Minor renaming. --- README.md | 14 ++++++++++++++ test/test_bindings_module.py | 10 +++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index dbfddff0..cbe6d362 100644 --- a/README.md +++ b/README.md @@ -165,6 +165,20 @@ You can find the documentation of the implemented interface in the file `precice The following error shows up during installation, if preCICE is not found: +```bash + /tmp/pip-install-d_fjyo1h/pyprecice/precice.cpp:643:10: fatal error: precice/Participant.hpp: No such file or directory + 643 | #include "precice/Participant.hpp" + | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + compilation terminated. + error: command 'x86_64-linux-gnu-gcc' failed with exit status 1 + ---------------------------------------- + ERROR: Failed building wheel for pyprecice +Failed to build pyprecice +ERROR: Could not build wheels for pyprecice which use PEP 517 and cannot be installed directly +``` + +Or, for preCICE v2: + ```bash /tmp/pip-install-d_fjyo1h/pyprecice/precice.cpp:643:10: fatal error: precice/SolverInterface.hpp: No such file or directory 643 | #include "precice/SolverInterface.hpp" diff --git a/test/test_bindings_module.py b/test/test_bindings_module.py index e7c18d5e..6d8ae041 100644 --- a/test/test_bindings_module.py +++ b/test/test_bindings_module.py @@ -24,17 +24,17 @@ def test_version(self): def test_get_mesh_dimensions(self): participant = precice.Participant("test", "dummy.xml", 0, 1) # TODO: it would be nice to be able to mock the output of the interface - # directly in the test, not in test/SolverInterface.hpp - fake_mesh_dimension = 3 # compare to test/SolverInterface.hpp, fake_mesh_dimension + # directly in the test, not in test/Participant.hpp + fake_mesh_dimension = 3 # compare to test/Participant.hpp, fake_mesh_dimension # TODO: it would be nice to be able to mock the output of the interface - # directly in the test, not in test/SolverInterface.hpp + # directly in the test, not in test/Participant.hpp self.assertEqual(fake_mesh_dimension, participant.get_mesh_dimensions("dummy")) def test_get_data_dimensions(self): participant = precice.Participant("test", "dummy.xml", 0, 1) - fake_scalar_data_dimension = 1 # compare to test/SolverInterface.hpp, fake_scalar_data_dimension + fake_scalar_data_dimension = 1 # compare to test/Participant.hpp, fake_scalar_data_dimension self.assertEqual(fake_scalar_data_dimension, participant.get_data_dimensions("dummy", "FakeScalarData")) - fake_vector_data_dimension = 3 # compare to test/SolverInterface.hpp, fake_vector_data_dimension + fake_vector_data_dimension = 3 # compare to test/Participant.hpp, fake_vector_data_dimension self.assertEqual(fake_vector_data_dimension, participant.get_data_dimensions("dummy", "FakeVectorData")) def test_requires_mesh_connectivity_for(self): From b6a481499461990637ec3838f5b5851cac1bd6c0 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Wed, 21 Jun 2023 10:31:54 +0200 Subject: [PATCH 09/33] Bump version in changelog. --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a89de7e..fe947a8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,9 @@ All notable changes to this project will be documented in this file. -## latest +## 3.0.0.0dev0 + +* Update to API introduced in preCICE v3.0.0. https://github.com/precice/python-bindings/pull/169 https://github.com/precice/python-bindings/pull/174 https://github.com/precice/python-bindings/pull/179 ## 2.5.0.2 From b97364c857f676312365f8d31e82d3f2fadd6747 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 25 Jul 2023 10:54:57 +0200 Subject: [PATCH 10/33] Removing underscores from all variables defined with cdef --- cyprecice/cyprecice.pyx | 130 ++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 9a6c3a70..575435f1 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -460,8 +460,8 @@ cdef class Interface: elif len(position) == 0: dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _position = np.ascontiguousarray(position, dtype=np.double) - vertex_id = self.thisptr.setMeshVertex(mesh_id, _position.data) + cdef np.ndarray[double, ndim=1] position_np = np.ascontiguousarray(position, dtype=np.double) + vertex_id = self.thisptr.setMeshVertex(mesh_id, position_np.data) return vertex_id def get_mesh_vertex_size (self, mesh_id): @@ -538,9 +538,9 @@ cdef class Interface: size = positions.shape[0] dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _positions = np.ascontiguousarray(positions.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] positions_np = np.ascontiguousarray(positions.flatten(), dtype=np.double) cdef np.ndarray[int, ndim=1] vertex_ids = np.empty(size, dtype=np.int32) - self.thisptr.setMeshVertices (mesh_id, size, _positions.data, vertex_ids.data) + self.thisptr.setMeshVertices (mesh_id, size, positions_np.data, vertex_ids.data) return vertex_ids def get_mesh_vertices(self, mesh_id, vertex_ids): @@ -584,11 +584,11 @@ cdef class Interface: """ check_array_like(vertex_ids, "vertex_ids", "get_mesh_vertices") - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = _vertex_ids.size - cdef np.ndarray[double, ndim=1] _positions = np.empty(size * self.get_dimensions(), dtype=np.double) - self.thisptr.getMeshVertices (mesh_id, size, _vertex_ids.data, _positions.data) - return _positions.reshape((size, self.get_dimensions())) + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + size = vertex_ids_np.size + cdef np.ndarray[double, ndim=1] positions_np = np.empty(size * self.get_dimensions(), dtype=np.double) + self.thisptr.getMeshVertices (mesh_id, size, vertex_ids_np.data, positions_np.data) + return positions_np.reshape((size, self.get_dimensions())) def get_mesh_vertex_ids_from_positions (self, mesh_id, positions): """ @@ -648,9 +648,9 @@ cdef class Interface: size = positions.shape[0] dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _positions = np.ascontiguousarray(positions.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] positions_np = np.ascontiguousarray(positions.flatten(), dtype=np.double) cdef np.ndarray[int, ndim=1] vertex_ids = np.empty(int(size), dtype=np.int32) - self.thisptr.getMeshVertexIDsFromPositions (mesh_id, size, _positions.data, vertex_ids.data) + self.thisptr.getMeshVertexIDsFromPositions (mesh_id, size, positions_np.data, vertex_ids.data) return vertex_ids def set_mesh_edge (self, mesh_id, first_vertex_id, second_vertex_id): @@ -916,13 +916,13 @@ cdef class Interface: if len(values) == 0: size = 0 - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _values = np.ascontiguousarray(values.flatten(), dtype=np.double) + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] values_np = np.ascontiguousarray(values.flatten(), dtype=np.double) - assert _values.size == size * self.get_dimensions(), "Vector data is not provided for all vertices in write_block_vector_data. Check length of input data provided. Provided size: {}, expected size: {}".format(_values.size, size * self.get_dimensions()) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_vector_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) + assert values_np.size == size * self.get_dimensions(), "Vector data is not provided for all vertices in write_block_vector_data. Check length of input data provided. Provided size: {}, expected size: {}".format(values_np.size, size * self.get_dimensions()) + assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_vector_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) - self.thisptr.writeBlockVectorData (data_id, size, _vertex_ids.data, _values.data) + self.thisptr.writeBlockVectorData (data_id, size, vertex_ids_np.data, values_np.data) def write_vector_data (self, data_id, vertex_id, value): """ @@ -967,9 +967,9 @@ cdef class Interface: assert dimensions == self.get_dimensions(), "Dimensions of vector data in write_vector_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) - cdef np.ndarray[np.double_t, ndim=1] _value = np.ascontiguousarray(value, dtype=np.double) + cdef np.ndarray[np.double_t, ndim=1] value_np = np.ascontiguousarray(value, dtype=np.double) - self.thisptr.writeVectorData (data_id, vertex_id, _value.data) + self.thisptr.writeVectorData (data_id, vertex_id, value_np.data) def write_block_scalar_data (self, data_id, vertex_ids, values): """ @@ -1008,12 +1008,12 @@ cdef class Interface: if len(values) == 0: size = 0 - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _values = np.ascontiguousarray(values, dtype=np.double) + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] values_np = np.ascontiguousarray(values, dtype=np.double) - assert _values.size == size, "Scalar data is not provided for all vertices in write_block_scalar_data. Check size of input data provided. Provided size: {}, expected size: {}".format(_values.size, size) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_scalar_data. Check size of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) - self.thisptr.writeBlockScalarData (data_id, size, _vertex_ids.data, _values.data) + assert values_np.size == size, "Scalar data is not provided for all vertices in write_block_scalar_data. Check size of input data provided. Provided size: {}, expected size: {}".format(values_np.size, size) + assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_scalar_data. Check size of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) + self.thisptr.writeBlockScalarData (data_id, size, vertex_ids_np.data, values_np.data) def write_scalar_data (self, data_id, vertex_id, double value): """ @@ -1088,15 +1088,15 @@ cdef class Interface: """ check_array_like(vertex_ids, "vertex_ids", "read_block_vector_data") - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = _vertex_ids.size + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + size = vertex_ids_np.size dimensions = self.get_dimensions() - cdef np.ndarray[np.double_t, ndim=1] _values = np.empty(size * dimensions, dtype=np.double) + cdef np.ndarray[np.double_t, ndim=1] values_np = np.empty(size * dimensions, dtype=np.double) if relative_read_time is None: - self.thisptr.readBlockVectorData (data_id, size, _vertex_ids.data, _values.data) + self.thisptr.readBlockVectorData (data_id, size, vertex_ids_np.data, values_np.data) else: - self.thisptr.readBlockVectorData (data_id, size, _vertex_ids.data, relative_read_time, _values.data) - return _values.reshape((size, dimensions)) + self.thisptr.readBlockVectorData (data_id, size, vertex_ids_np.data, relative_read_time, values_np.data) + return values_np.reshape((size, dimensions)) def read_vector_data (self, data_id, vertex_id, relative_read_time=None): """ @@ -1140,12 +1140,12 @@ cdef class Interface: (1, 3) """ dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _value = np.empty(dimensions, dtype=np.double) + cdef np.ndarray[double, ndim=1] value_np = np.empty(dimensions, dtype=np.double) if relative_read_time == None: - self.thisptr.readVectorData (data_id, vertex_id, _value.data) + self.thisptr.readVectorData (data_id, vertex_id, value_np.data) else: - self.thisptr.readVectorData (data_id, vertex_id, relative_read_time, _value.data) - return _value + self.thisptr.readVectorData (data_id, vertex_id, relative_read_time, value_np.data) + return value_np def read_block_scalar_data (self, data_id, vertex_ids, relative_read_time=None): """ @@ -1185,15 +1185,15 @@ cdef class Interface: """ check_array_like(vertex_ids, "vertex_ids", "read_block_scalar_data") - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = _vertex_ids.size - cdef np.ndarray[double, ndim=1] _values = np.empty(size, dtype=np.double) + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + size = vertex_ids_np.size + cdef np.ndarray[double, ndim=1] values_np = np.empty(size, dtype=np.double) if relative_read_time == None: - self.thisptr.readBlockScalarData (data_id, size, _vertex_ids.data, _values.data) + self.thisptr.readBlockScalarData (data_id, size, vertex_ids_np.data, values_np.data) else: - self.thisptr.readBlockScalarData (data_id, size, _vertex_ids.data, relative_read_time, _values.data) + self.thisptr.readBlockScalarData (data_id, size, vertex_ids_np.data, relative_read_time, values_np.data) - return _values + return values_np def read_scalar_data (self, data_id, vertex_id, relative_read_time=None): """ @@ -1225,13 +1225,13 @@ cdef class Interface: >>> vertex_id = 5 >>> value = interface.read_scalar_data(data_id, vertex_id) """ - cdef double _value + cdef double value_np if relative_read_time == None: - self.thisptr.readScalarData (data_id, vertex_id, _value) + self.thisptr.readScalarData (data_id, vertex_id, value_np) else: - self.thisptr.readScalarData (data_id, vertex_id, relative_read_time, _value) + self.thisptr.readScalarData (data_id, vertex_id, relative_read_time, value_np) - return _value + return value_np def write_block_vector_gradient_data (self, data_id, vertex_ids, gradientValues): """ @@ -1282,13 +1282,13 @@ cdef class Interface: if len(gradientValues) == 0: size = 0 - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert _gradientValues.size == size * self.get_dimensions() * self.get_dimensions(), "Dimension of vector gradient data provided in write_block_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, size * self.get_dimensions() * self.get_dimensions()) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_vector_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) + assert gradientValues_np.size == size * self.get_dimensions() * self.get_dimensions(), "Dimension of vector gradient data provided in write_block_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, size * self.get_dimensions() * self.get_dimensions()) + assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_vector_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) - self.thisptr.writeBlockVectorGradientData (data_id, size, _vertex_ids.data, _gradientValues.data) + self.thisptr.writeBlockVectorGradientData (data_id, size, vertex_ids_np.data, gradientValues_np.data) def write_scalar_gradient_data (self, data_id, vertex_id, gradientValues): """ @@ -1335,11 +1335,11 @@ cdef class Interface: if not isinstance(gradientValues, np.ndarray): gradientValues = np.asarray(gradientValues) - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert _gradientValues.size == self.get_dimensions(), "Vector data provided for vertex {} in write_scalar_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, self.get_dimensions()) + assert gradientValues_np.size == self.get_dimensions(), "Vector data provided for vertex {} in write_scalar_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, self.get_dimensions()) - self.thisptr.writeScalarGradientData(data_id, vertex_id, _gradientValues.data) + self.thisptr.writeScalarGradientData(data_id, vertex_id, gradientValues_np.data) def write_vector_gradient_data (self, data_id, vertex_id, gradientValues): """ @@ -1386,11 +1386,11 @@ cdef class Interface: if not isinstance(gradientValues, np.ndarray): gradientValues = np.asarray(gradientValues) - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert _gradientValues.size == self.get_dimensions() * self.get_dimensions(), "Dimensions of vector gradient data provided for vertex {} in write_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, self.get_dimensions() * self.get_dimensions()) + assert gradientValues_np.size == self.get_dimensions() * self.get_dimensions(), "Dimensions of vector gradient data provided for vertex {} in write_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, self.get_dimensions() * self.get_dimensions()) - self.thisptr.writeVectorGradientData(data_id, vertex_id, _gradientValues.data) + self.thisptr.writeVectorGradientData(data_id, vertex_id, gradientValues_np.data) def write_block_scalar_gradient_data (self, data_id, vertex_ids, gradientValues): """ @@ -1441,13 +1441,13 @@ cdef class Interface: if len(gradientValues) == 0: size = 0 - cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert _gradientValues.size == size * self.get_dimensions(), "Scalar gradient data is not provided for all vertices in write_block_scalar_gradient_data. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, size * self.get_dimensions()) - assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_scalar_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) + assert gradientValues_np.size == size * self.get_dimensions(), "Scalar gradient data is not provided for all vertices in write_block_scalar_gradient_data. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, size * self.get_dimensions()) + assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_scalar_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) - self.thisptr.writeBlockScalarGradientData (data_id, size, _vertex_ids.data, _gradientValues.data) + self.thisptr.writeBlockScalarGradientData (data_id, size, vertex_ids_np.data, gradientValues_np.data) def is_gradient_data_required(self,data_id): """ @@ -1526,9 +1526,9 @@ cdef class Interface: assert len(bounding_box) == (self.get_dimensions() * 2), "Dimensions of bounding box in set_mesh_access_region does not match with dimensions in problem definition." - cdef np.ndarray[double, ndim=1] _bounding_box = np.ascontiguousarray(bounding_box, dtype=np.double) + cdef np.ndarray[double, ndim=1] bounding_box_np = np.ascontiguousarray(bounding_box, dtype=np.double) - self.thisptr.setMeshAccessRegion(mesh_id, _bounding_box.data) + self.thisptr.setMeshAccessRegion(mesh_id, bounding_box_np.data) def get_mesh_vertices_and_ids (self, mesh_id): """ @@ -1550,13 +1550,13 @@ cdef class Interface: warnings.warn("The function get_mesh_vertices_and_ids is still experimental.") size = self.get_mesh_vertex_size(mesh_id) - cdef np.ndarray[int, ndim=1] _ids = np.empty(size, dtype=np.int32) + cdef np.ndarray[int, ndim=1] ids_np = np.empty(size, dtype=np.int32) dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] _coordinates = np.empty(size*dimensions, dtype=np.double) + cdef np.ndarray[double, ndim=1] coordinates_np = np.empty(size*dimensions, dtype=np.double) - self.thisptr.getMeshVerticesAndIDs(mesh_id, size, _ids.data, _coordinates.data) + self.thisptr.getMeshVerticesAndIDs(mesh_id, size, ids_np.data, coordinates_np.data) - return _ids, _coordinates.reshape((size, dimensions)) + return ids_np, coordinates_np.reshape((size, dimensions)) def get_version_information (): """ From 5814ee6386c26ab1d5e1d96d5c658a98a853dc2e Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 25 Jul 2023 11:18:10 +0200 Subject: [PATCH 11/33] Use precice/precice:latest in GitHub Actions CI --- .github/workflows/build-and-test.yml | 10 +++++----- .github/workflows/run-solverdummy.yml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 7b182dc6..211b25cc 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -13,7 +13,7 @@ jobs: needs: [setup_test] runs-on: ubuntu-latest container: - image: benjaminrodenberg/precice:2.5.0 + image: precice/precice:latest options: --user root steps: - name: Checkout Repository @@ -37,7 +37,7 @@ jobs: name: Run setup install runs-on: ubuntu-latest container: - image: benjaminrodenberg/precice:2.5.0 + image: precice/precice:latest options: --user root steps: - name: Checkout Repository @@ -62,7 +62,7 @@ jobs: needs: [setup_install] runs-on: ubuntu-latest container: - image: benjaminrodenberg/precice:2.5.0 + image: precice/precice:latest options: --user root steps: - name: Checkout Repository @@ -119,7 +119,7 @@ jobs: needs: [setup_test] runs-on: ubuntu-latest container: - image: benjaminrodenberg/precice:2.5.0 + image: precice/precice:latest options: --user root steps: - name: Checkout Repository @@ -140,7 +140,7 @@ jobs: needs: [setup_install, setup_test] runs-on: ubuntu-latest container: - image: benjaminrodenberg/precice:2.5.0 + image: precice/precice:latest options: --user root steps: - name: Checkout Repository diff --git a/.github/workflows/run-solverdummy.yml b/.github/workflows/run-solverdummy.yml index 1e00b9a5..08b04d51 100644 --- a/.github/workflows/run-solverdummy.yml +++ b/.github/workflows/run-solverdummy.yml @@ -11,7 +11,7 @@ jobs: name: Run solverdummies runs-on: ubuntu-latest container: - image: benjaminrodenberg/precice:2.5.0 + image: precice/precice:latest options: --user root steps: - name: Checkout Repository From 6f1f9f575a435fe40b94f5c194f120beebc24e5c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 25 Jul 2023 11:26:32 +0200 Subject: [PATCH 12/33] bump version in CHANGELOG --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a89de7e..8205aa17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ All notable changes to this project will be documented in this file. ## latest +## 2.5.0.3 + +* Stop using `_` before variables names which are defined using `cdef`. Necessary for compatibility with [Cython v3.0.0](https://cython.readthedocs.io/en/latest/src/userguide/migrating_to_cy30.html#) + ## 2.5.0.2 * Add Waveform API introduced in preCICE v2.4.0. From e63054760dfe8dc25700eb6b6246de2220aca2d1 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 25 Jul 2023 13:01:19 +0200 Subject: [PATCH 13/33] Revert "Removing underscores from all variables defined with cdef" This reverts commit b97364c857f676312365f8d31e82d3f2fadd6747. --- cyprecice/cyprecice.pyx | 130 ++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 575435f1..9a6c3a70 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -460,8 +460,8 @@ cdef class Interface: elif len(position) == 0: dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] position_np = np.ascontiguousarray(position, dtype=np.double) - vertex_id = self.thisptr.setMeshVertex(mesh_id, position_np.data) + cdef np.ndarray[double, ndim=1] _position = np.ascontiguousarray(position, dtype=np.double) + vertex_id = self.thisptr.setMeshVertex(mesh_id, _position.data) return vertex_id def get_mesh_vertex_size (self, mesh_id): @@ -538,9 +538,9 @@ cdef class Interface: size = positions.shape[0] dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] positions_np = np.ascontiguousarray(positions.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] _positions = np.ascontiguousarray(positions.flatten(), dtype=np.double) cdef np.ndarray[int, ndim=1] vertex_ids = np.empty(size, dtype=np.int32) - self.thisptr.setMeshVertices (mesh_id, size, positions_np.data, vertex_ids.data) + self.thisptr.setMeshVertices (mesh_id, size, _positions.data, vertex_ids.data) return vertex_ids def get_mesh_vertices(self, mesh_id, vertex_ids): @@ -584,11 +584,11 @@ cdef class Interface: """ check_array_like(vertex_ids, "vertex_ids", "get_mesh_vertices") - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = vertex_ids_np.size - cdef np.ndarray[double, ndim=1] positions_np = np.empty(size * self.get_dimensions(), dtype=np.double) - self.thisptr.getMeshVertices (mesh_id, size, vertex_ids_np.data, positions_np.data) - return positions_np.reshape((size, self.get_dimensions())) + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + size = _vertex_ids.size + cdef np.ndarray[double, ndim=1] _positions = np.empty(size * self.get_dimensions(), dtype=np.double) + self.thisptr.getMeshVertices (mesh_id, size, _vertex_ids.data, _positions.data) + return _positions.reshape((size, self.get_dimensions())) def get_mesh_vertex_ids_from_positions (self, mesh_id, positions): """ @@ -648,9 +648,9 @@ cdef class Interface: size = positions.shape[0] dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] positions_np = np.ascontiguousarray(positions.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] _positions = np.ascontiguousarray(positions.flatten(), dtype=np.double) cdef np.ndarray[int, ndim=1] vertex_ids = np.empty(int(size), dtype=np.int32) - self.thisptr.getMeshVertexIDsFromPositions (mesh_id, size, positions_np.data, vertex_ids.data) + self.thisptr.getMeshVertexIDsFromPositions (mesh_id, size, _positions.data, vertex_ids.data) return vertex_ids def set_mesh_edge (self, mesh_id, first_vertex_id, second_vertex_id): @@ -916,13 +916,13 @@ cdef class Interface: if len(values) == 0: size = 0 - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] values_np = np.ascontiguousarray(values.flatten(), dtype=np.double) + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] _values = np.ascontiguousarray(values.flatten(), dtype=np.double) - assert values_np.size == size * self.get_dimensions(), "Vector data is not provided for all vertices in write_block_vector_data. Check length of input data provided. Provided size: {}, expected size: {}".format(values_np.size, size * self.get_dimensions()) - assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_vector_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) + assert _values.size == size * self.get_dimensions(), "Vector data is not provided for all vertices in write_block_vector_data. Check length of input data provided. Provided size: {}, expected size: {}".format(_values.size, size * self.get_dimensions()) + assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_vector_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) - self.thisptr.writeBlockVectorData (data_id, size, vertex_ids_np.data, values_np.data) + self.thisptr.writeBlockVectorData (data_id, size, _vertex_ids.data, _values.data) def write_vector_data (self, data_id, vertex_id, value): """ @@ -967,9 +967,9 @@ cdef class Interface: assert dimensions == self.get_dimensions(), "Dimensions of vector data in write_vector_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_dimensions()) - cdef np.ndarray[np.double_t, ndim=1] value_np = np.ascontiguousarray(value, dtype=np.double) + cdef np.ndarray[np.double_t, ndim=1] _value = np.ascontiguousarray(value, dtype=np.double) - self.thisptr.writeVectorData (data_id, vertex_id, value_np.data) + self.thisptr.writeVectorData (data_id, vertex_id, _value.data) def write_block_scalar_data (self, data_id, vertex_ids, values): """ @@ -1008,12 +1008,12 @@ cdef class Interface: if len(values) == 0: size = 0 - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] values_np = np.ascontiguousarray(values, dtype=np.double) + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] _values = np.ascontiguousarray(values, dtype=np.double) - assert values_np.size == size, "Scalar data is not provided for all vertices in write_block_scalar_data. Check size of input data provided. Provided size: {}, expected size: {}".format(values_np.size, size) - assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_scalar_data. Check size of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) - self.thisptr.writeBlockScalarData (data_id, size, vertex_ids_np.data, values_np.data) + assert _values.size == size, "Scalar data is not provided for all vertices in write_block_scalar_data. Check size of input data provided. Provided size: {}, expected size: {}".format(_values.size, size) + assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_scalar_data. Check size of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) + self.thisptr.writeBlockScalarData (data_id, size, _vertex_ids.data, _values.data) def write_scalar_data (self, data_id, vertex_id, double value): """ @@ -1088,15 +1088,15 @@ cdef class Interface: """ check_array_like(vertex_ids, "vertex_ids", "read_block_vector_data") - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = vertex_ids_np.size + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + size = _vertex_ids.size dimensions = self.get_dimensions() - cdef np.ndarray[np.double_t, ndim=1] values_np = np.empty(size * dimensions, dtype=np.double) + cdef np.ndarray[np.double_t, ndim=1] _values = np.empty(size * dimensions, dtype=np.double) if relative_read_time is None: - self.thisptr.readBlockVectorData (data_id, size, vertex_ids_np.data, values_np.data) + self.thisptr.readBlockVectorData (data_id, size, _vertex_ids.data, _values.data) else: - self.thisptr.readBlockVectorData (data_id, size, vertex_ids_np.data, relative_read_time, values_np.data) - return values_np.reshape((size, dimensions)) + self.thisptr.readBlockVectorData (data_id, size, _vertex_ids.data, relative_read_time, _values.data) + return _values.reshape((size, dimensions)) def read_vector_data (self, data_id, vertex_id, relative_read_time=None): """ @@ -1140,12 +1140,12 @@ cdef class Interface: (1, 3) """ dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] value_np = np.empty(dimensions, dtype=np.double) + cdef np.ndarray[double, ndim=1] _value = np.empty(dimensions, dtype=np.double) if relative_read_time == None: - self.thisptr.readVectorData (data_id, vertex_id, value_np.data) + self.thisptr.readVectorData (data_id, vertex_id, _value.data) else: - self.thisptr.readVectorData (data_id, vertex_id, relative_read_time, value_np.data) - return value_np + self.thisptr.readVectorData (data_id, vertex_id, relative_read_time, _value.data) + return _value def read_block_scalar_data (self, data_id, vertex_ids, relative_read_time=None): """ @@ -1185,15 +1185,15 @@ cdef class Interface: """ check_array_like(vertex_ids, "vertex_ids", "read_block_scalar_data") - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - size = vertex_ids_np.size - cdef np.ndarray[double, ndim=1] values_np = np.empty(size, dtype=np.double) + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + size = _vertex_ids.size + cdef np.ndarray[double, ndim=1] _values = np.empty(size, dtype=np.double) if relative_read_time == None: - self.thisptr.readBlockScalarData (data_id, size, vertex_ids_np.data, values_np.data) + self.thisptr.readBlockScalarData (data_id, size, _vertex_ids.data, _values.data) else: - self.thisptr.readBlockScalarData (data_id, size, vertex_ids_np.data, relative_read_time, values_np.data) + self.thisptr.readBlockScalarData (data_id, size, _vertex_ids.data, relative_read_time, _values.data) - return values_np + return _values def read_scalar_data (self, data_id, vertex_id, relative_read_time=None): """ @@ -1225,13 +1225,13 @@ cdef class Interface: >>> vertex_id = 5 >>> value = interface.read_scalar_data(data_id, vertex_id) """ - cdef double value_np + cdef double _value if relative_read_time == None: - self.thisptr.readScalarData (data_id, vertex_id, value_np) + self.thisptr.readScalarData (data_id, vertex_id, _value) else: - self.thisptr.readScalarData (data_id, vertex_id, relative_read_time, value_np) + self.thisptr.readScalarData (data_id, vertex_id, relative_read_time, _value) - return value_np + return _value def write_block_vector_gradient_data (self, data_id, vertex_ids, gradientValues): """ @@ -1282,13 +1282,13 @@ cdef class Interface: if len(gradientValues) == 0: size = 0 - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert gradientValues_np.size == size * self.get_dimensions() * self.get_dimensions(), "Dimension of vector gradient data provided in write_block_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, size * self.get_dimensions() * self.get_dimensions()) - assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_vector_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) + assert _gradientValues.size == size * self.get_dimensions() * self.get_dimensions(), "Dimension of vector gradient data provided in write_block_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, size * self.get_dimensions() * self.get_dimensions()) + assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_vector_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) - self.thisptr.writeBlockVectorGradientData (data_id, size, vertex_ids_np.data, gradientValues_np.data) + self.thisptr.writeBlockVectorGradientData (data_id, size, _vertex_ids.data, _gradientValues.data) def write_scalar_gradient_data (self, data_id, vertex_id, gradientValues): """ @@ -1335,11 +1335,11 @@ cdef class Interface: if not isinstance(gradientValues, np.ndarray): gradientValues = np.asarray(gradientValues) - cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert gradientValues_np.size == self.get_dimensions(), "Vector data provided for vertex {} in write_scalar_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, self.get_dimensions()) + assert _gradientValues.size == self.get_dimensions(), "Vector data provided for vertex {} in write_scalar_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, self.get_dimensions()) - self.thisptr.writeScalarGradientData(data_id, vertex_id, gradientValues_np.data) + self.thisptr.writeScalarGradientData(data_id, vertex_id, _gradientValues.data) def write_vector_gradient_data (self, data_id, vertex_id, gradientValues): """ @@ -1386,11 +1386,11 @@ cdef class Interface: if not isinstance(gradientValues, np.ndarray): gradientValues = np.asarray(gradientValues) - cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert gradientValues_np.size == self.get_dimensions() * self.get_dimensions(), "Dimensions of vector gradient data provided for vertex {} in write_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, self.get_dimensions() * self.get_dimensions()) + assert _gradientValues.size == self.get_dimensions() * self.get_dimensions(), "Dimensions of vector gradient data provided for vertex {} in write_vector_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, self.get_dimensions() * self.get_dimensions()) - self.thisptr.writeVectorGradientData(data_id, vertex_id, gradientValues_np.data) + self.thisptr.writeVectorGradientData(data_id, vertex_id, _gradientValues.data) def write_block_scalar_gradient_data (self, data_id, vertex_ids, gradientValues): """ @@ -1441,13 +1441,13 @@ cdef class Interface: if len(gradientValues) == 0: size = 0 - cdef np.ndarray[int, ndim=1] vertex_ids_np = np.ascontiguousarray(vertex_ids, dtype=np.int32) - cdef np.ndarray[double, ndim=1] gradientValues_np = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) + cdef np.ndarray[int, ndim=1] _vertex_ids = np.ascontiguousarray(vertex_ids, dtype=np.int32) + cdef np.ndarray[double, ndim=1] _gradientValues = np.ascontiguousarray(gradientValues.flatten(), dtype=np.double) - assert gradientValues_np.size == size * self.get_dimensions(), "Scalar gradient data is not provided for all vertices in write_block_scalar_gradient_data. Check length of input data provided. Provided size: {}, expected size: {}".format(gradientValues_np.size, size * self.get_dimensions()) - assert vertex_ids_np.size == size, "Vertex IDs are of incorrect length in write_block_scalar_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids_np.size, size) + assert _gradientValues.size == size * self.get_dimensions(), "Scalar gradient data is not provided for all vertices in write_block_scalar_gradient_data. Check length of input data provided. Provided size: {}, expected size: {}".format(_gradientValues.size, size * self.get_dimensions()) + assert _vertex_ids.size == size, "Vertex IDs are of incorrect length in write_block_scalar_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(_vertex_ids.size, size) - self.thisptr.writeBlockScalarGradientData (data_id, size, vertex_ids_np.data, gradientValues_np.data) + self.thisptr.writeBlockScalarGradientData (data_id, size, _vertex_ids.data, _gradientValues.data) def is_gradient_data_required(self,data_id): """ @@ -1526,9 +1526,9 @@ cdef class Interface: assert len(bounding_box) == (self.get_dimensions() * 2), "Dimensions of bounding box in set_mesh_access_region does not match with dimensions in problem definition." - cdef np.ndarray[double, ndim=1] bounding_box_np = np.ascontiguousarray(bounding_box, dtype=np.double) + cdef np.ndarray[double, ndim=1] _bounding_box = np.ascontiguousarray(bounding_box, dtype=np.double) - self.thisptr.setMeshAccessRegion(mesh_id, bounding_box_np.data) + self.thisptr.setMeshAccessRegion(mesh_id, _bounding_box.data) def get_mesh_vertices_and_ids (self, mesh_id): """ @@ -1550,13 +1550,13 @@ cdef class Interface: warnings.warn("The function get_mesh_vertices_and_ids is still experimental.") size = self.get_mesh_vertex_size(mesh_id) - cdef np.ndarray[int, ndim=1] ids_np = np.empty(size, dtype=np.int32) + cdef np.ndarray[int, ndim=1] _ids = np.empty(size, dtype=np.int32) dimensions = self.get_dimensions() - cdef np.ndarray[double, ndim=1] coordinates_np = np.empty(size*dimensions, dtype=np.double) + cdef np.ndarray[double, ndim=1] _coordinates = np.empty(size*dimensions, dtype=np.double) - self.thisptr.getMeshVerticesAndIDs(mesh_id, size, ids_np.data, coordinates_np.data) + self.thisptr.getMeshVerticesAndIDs(mesh_id, size, _ids.data, _coordinates.data) - return ids_np, coordinates_np.reshape((size, dimensions)) + return _ids, _coordinates.reshape((size, dimensions)) def get_version_information (): """ From 90fdcf964e8743dbe1a47e1597fe746646749d6e Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 26 Jul 2023 14:36:43 +0200 Subject: [PATCH 14/33] Add cimport numpy to resolve seg fault originating from using Cython --- CHANGELOG.md | 2 +- cyprecice/cyprecice.pyx | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8205aa17..724cf985 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ All notable changes to this project will be documented in this file. ## 2.5.0.3 -* Stop using `_` before variables names which are defined using `cdef`. Necessary for compatibility with [Cython v3.0.0](https://cython.readthedocs.io/en/latest/src/userguide/migrating_to_cy30.html#) +* Add `cimport numpy` to avoid a segmentation fault originating from using Cython v3.0.0. ## 2.5.0.2 diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 9a6c3a70..056f9b2a 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -6,6 +6,7 @@ The python module precice offers python language bindings to the C++ coupling li """ cimport cyprecice +cimport numpy import numpy as np from mpi4py import MPI import warnings From 117abe11ff5eb93ee3bd37e8b8d750bbbb011313 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 26 Jul 2023 15:11:52 +0200 Subject: [PATCH 15/33] Update the changelog --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 724cf985..4b0d190f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,9 @@ All notable changes to this project will be documented in this file. -## latest - ## 2.5.0.3 -* Add `cimport numpy` to avoid a segmentation fault originating from using Cython v3.0.0. +* Add `cimport numpy` to avoid a segmentation fault originating from using Cython v3.0.0. https://github.com/precice/python-bindings/issues/182 ## 2.5.0.2 From 64af1b9c6ee776cb5496f0bfa555f486441d2be6 Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Wed, 26 Jul 2023 15:56:00 +0200 Subject: [PATCH 16/33] Update versioneer from 0.19 to 0.29. --- CHANGELOG.md | 1 + precice/__init__.py | 5 +- precice/_version.py | 318 ++++++++++---- versioneer.py | 1026 ++++++++++++++++++++++++++++++------------- 4 files changed, 964 insertions(+), 386 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b0d190f..7db41cac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ All notable changes to this project will be documented in this file. ## 2.5.0.3 +* Update from versioneer 0.19 to 0.29. * Add `cimport numpy` to avoid a segmentation fault originating from using Cython v3.0.0. https://github.com/precice/python-bindings/issues/182 ## 2.5.0.2 diff --git a/precice/__init__.py b/precice/__init__.py index 8c28e592..70ffa0a0 100644 --- a/precice/__init__.py +++ b/precice/__init__.py @@ -11,6 +11,5 @@ def SolverInterface(*args): warnings.warn("please use precice.Interface to create the interface to C++ preCICE. Note that this function (precice.SolverInterface) does not do anything but throwing this warning. See https://github.com/precice/python-bindings/issues/92 for more information.") -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions +from . import _version +__version__ = _version.get_versions()['version'] diff --git a/precice/_version.py b/precice/_version.py index a99bcb1f..81f74377 100644 --- a/precice/_version.py +++ b/precice/_version.py @@ -5,8 +5,9 @@ # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. -# This file is released into the public domain. Generated by -# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) +# This file is released into the public domain. +# Generated by versioneer-0.29 +# https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" @@ -15,9 +16,11 @@ import re import subprocess import sys +from typing import Any, Callable, Dict, List, Optional, Tuple +import functools -def get_keywords(): +def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must @@ -33,8 +36,15 @@ def get_keywords(): class VersioneerConfig: """Container for Versioneer configuration parameters.""" + VCS: str + style: str + tag_prefix: str + parentdir_prefix: str + versionfile_source: str + verbose: bool -def get_config(): + +def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py @@ -52,13 +62,13 @@ class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} -def register_vcs_handler(vcs, method): # decorator +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): + def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} @@ -67,22 +77,35 @@ def decorate(f): return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) break - except EnvironmentError: - e = sys.exc_info()[1] + except OSError as e: if e.errno == errno.ENOENT: continue if verbose: @@ -93,16 +116,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip().decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode -def versions_from_parentdir(parentdir_prefix, root, verbose): +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both @@ -111,15 +138,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % @@ -128,39 +154,42 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): @register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. - keywords = {} + keywords: Dict[str, str] = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature @@ -179,11 +208,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -192,7 +221,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -201,6 +230,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue if verbose: print("picking %s" % r) return {"version": r, @@ -216,7 +250,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -227,8 +266,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -236,24 +282,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() - pieces = {} + pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -270,7 +349,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces @@ -295,13 +374,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] @@ -310,14 +387,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): return pieces -def plus_or_dot(pieces): +def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" -def render_pep440(pieces): +def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you @@ -342,23 +419,71 @@ def render_pep440(pieces): return rendered -def render_pep440_pre(pieces): - """TAG[.post0.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post0.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post0.dev%d" % pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered -def render_pep440_post(pieces): +def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards @@ -385,7 +510,36 @@ def render_pep440_post(pieces): return rendered -def render_pep440_old(pieces): +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. @@ -407,7 +561,7 @@ def render_pep440_old(pieces): return rendered -def render_git_describe(pieces): +def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. @@ -427,7 +581,7 @@ def render_git_describe(pieces): return rendered -def render_git_describe_long(pieces): +def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. @@ -447,7 +601,7 @@ def render_git_describe_long(pieces): return rendered -def render(pieces, style): +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", @@ -461,10 +615,14 @@ def render(pieces, style): if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -479,7 +637,7 @@ def render(pieces, style): "date": pieces.get("date")} -def get_versions(): +def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some @@ -500,7 +658,7 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, diff --git a/versioneer.py b/versioneer.py index 6b03bb75..1e3753e6 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,5 +1,5 @@ -# Version: 0.19 +# Version: 0.29 """The Versioneer - like a rocketeer, but for versions. @@ -9,12 +9,12 @@ * like a rocketeer, but for versions! * https://github.com/python-versioneer/python-versioneer * Brian Warner -* License: Public Domain -* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3 +* License: Public Domain (Unlicense) +* Compatible with: Python 3.7, 3.8, 3.9, 3.10, 3.11 and pypy3 * [![Latest Version][pypi-image]][pypi-url] * [![Build Status][travis-image]][travis-url] -This is a tool for managing a recorded version number in distutils-based +This is a tool for managing a recorded version number in setuptools-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control @@ -23,10 +23,38 @@ ## Quick Install +Versioneer provides two installation modes. The "classic" vendored mode installs +a copy of versioneer into your repository. The experimental build-time dependency mode +is intended to allow you to skip this step and simplify the process of upgrading. + +### Vendored mode + +* `pip install versioneer` to somewhere in your $PATH + * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is + available, so you can also use `conda install -c conda-forge versioneer` +* add a `[tool.versioneer]` section to your `pyproject.toml` or a + `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) + * Note that you will need to add `tomli; python_version < "3.11"` to your + build-time dependencies if you use `pyproject.toml` +* run `versioneer install --vendor` in your source tree, commit the results +* verify version information with `python setup.py version` + +### Build-time dependency mode + * `pip install versioneer` to somewhere in your $PATH -* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md)) -* run `versioneer install` in your source tree, commit the results -* Verify version information with `python setup.py version` + * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is + available, so you can also use `conda install -c conda-forge versioneer` +* add a `[tool.versioneer]` section to your `pyproject.toml` or a + `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) +* add `versioneer` (with `[toml]` extra, if configuring in `pyproject.toml`) + to the `requires` key of the `build-system` table in `pyproject.toml`: + ```toml + [build-system] + requires = ["setuptools", "versioneer[toml]"] + build-backend = "setuptools.build_meta" + ``` +* run `versioneer install --no-vendor` in your source tree, commit the results +* verify version information with `python setup.py version` ## Version Identifiers @@ -231,9 +259,10 @@ To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace +* edit `setup.cfg` and `pyproject.toml`, if necessary, + to include any new configuration settings indicated by the release notes. + See [UPGRADING](./UPGRADING.md) for details. +* re-run `versioneer install --[no-]vendor` in your source tree, to replace `SRC/_version.py` * commit any changed files @@ -256,14 +285,15 @@ dependency * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of versioneer +* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools + plugin ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . +Specifically, both are released under the "Unlicense", as described in +https://unlicense.org/. [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg [pypi-url]: https://pypi.python.org/pypi/versioneer/ @@ -272,6 +302,11 @@ [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ +# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring +# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements +# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error +# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with +# pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno @@ -280,13 +315,34 @@ import re import subprocess import sys +from pathlib import Path +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union +from typing import NoReturn +import functools + +have_tomllib = True +if sys.version_info >= (3, 11): + import tomllib +else: + try: + import tomli as tomllib + except ImportError: + have_tomllib = False class VersioneerConfig: """Container for Versioneer configuration parameters.""" + VCS: str + style: str + tag_prefix: str + versionfile_source: str + versionfile_build: Optional[str] + parentdir_prefix: Optional[str] + verbose: Optional[bool] -def get_root(): + +def get_root() -> str: """Get the project root directory. We require that all commands are run from the project root, i.e. the @@ -294,13 +350,23 @@ def get_root(): """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") + pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + if not ( + os.path.exists(setup_py) + or os.path.exists(pyproject_toml) + or os.path.exists(versioneer_py) + ): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") + pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + if not ( + os.path.exists(setup_py) + or os.path.exists(pyproject_toml) + or os.path.exists(versioneer_py) + ): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " @@ -314,43 +380,62 @@ def get_root(): # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(me)[0]) + my_path = os.path.realpath(os.path.abspath(__file__)) + me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: + if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root -def get_config_from_root(root): +def get_config_from_root(root: str) -> VersioneerConfig: """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise EnvironmentError (if setup.cfg is missing), or + # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.ConfigParser() - with open(setup_cfg, "r") as f: - parser.read_file(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None + root_pth = Path(root) + pyproject_toml = root_pth / "pyproject.toml" + setup_cfg = root_pth / "setup.cfg" + section: Union[Dict[str, Any], configparser.SectionProxy, None] = None + if pyproject_toml.exists() and have_tomllib: + try: + with open(pyproject_toml, 'rb') as fobj: + pp = tomllib.load(fobj) + section = pp['tool']['versioneer'] + except (tomllib.TOMLDecodeError, KeyError) as e: + print(f"Failed to load config from {pyproject_toml}: {e}") + print("Try to load it from setup.cfg") + if not section: + parser = configparser.ConfigParser() + with open(setup_cfg) as cfg_file: + parser.read_file(cfg_file) + parser.get("versioneer", "VCS") # raise error if missing + + section = parser["versioneer"] + + # `cast`` really shouldn't be used, but its simplest for the + # common VersioneerConfig users at the moment. We verify against + # `None` values elsewhere where it matters + cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - if cfg.tag_prefix in ("''", '""'): + cfg.VCS = section['VCS'] + cfg.style = section.get("style", "") + cfg.versionfile_source = cast(str, section.get("versionfile_source")) + cfg.versionfile_build = section.get("versionfile_build") + cfg.tag_prefix = cast(str, section.get("tag_prefix")) + if cfg.tag_prefix in ("''", '""', None): cfg.tag_prefix = "" - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") + cfg.parentdir_prefix = section.get("parentdir_prefix") + if isinstance(section, configparser.SectionProxy): + # Make sure configparser translates to bool + cfg.verbose = section.getboolean("verbose") + else: + cfg.verbose = section.get("verbose") + return cfg @@ -359,37 +444,48 @@ class NotThisMethod(Exception): # these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} -def register_vcs_handler(vcs, method): # decorator +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): + def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f + HANDLERS.setdefault(vcs, {})[method] = f return f return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) break - except EnvironmentError: - e = sys.exc_info()[1] + except OSError as e: if e.errno == errno.ENOENT: continue if verbose: @@ -400,13 +496,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip().decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode LONG_VERSION_PY['git'] = r''' @@ -416,8 +512,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. -# This file is released into the public domain. Generated by -# versioneer-0.19 (https://github.com/python-versioneer/python-versioneer) +# This file is released into the public domain. +# Generated by versioneer-0.29 +# https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" @@ -426,9 +523,11 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, import re import subprocess import sys +from typing import Any, Callable, Dict, List, Optional, Tuple +import functools -def get_keywords(): +def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must @@ -444,8 +543,15 @@ def get_keywords(): class VersioneerConfig: """Container for Versioneer configuration parameters.""" + VCS: str + style: str + tag_prefix: str + parentdir_prefix: str + versionfile_source: str + verbose: bool + -def get_config(): +def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py @@ -463,13 +569,13 @@ class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} -def register_vcs_handler(vcs, method): # decorator +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" - def decorate(f): + def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} @@ -478,22 +584,35 @@ def decorate(f): return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) break - except EnvironmentError: - e = sys.exc_info()[1] + except OSError as e: if e.errno == errno.ENOENT: continue if verbose: @@ -504,16 +623,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None - stdout = p.communicate()[0].strip().decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode -def versions_from_parentdir(parentdir_prefix, root, verbose): +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both @@ -522,15 +645,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% @@ -539,39 +661,42 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): @register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. - keywords = {} + keywords: Dict[str, str] = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature @@ -590,11 +715,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d @@ -603,7 +728,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: @@ -612,6 +737,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue if verbose: print("picking %%s" %% r) return {"version": r, @@ -627,7 +757,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -638,8 +773,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) @@ -647,24 +789,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%%s*" %% tag_prefix], - cwd=root) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() - pieces = {} + pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -681,7 +856,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces @@ -706,13 +881,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] @@ -721,14 +894,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): return pieces -def plus_or_dot(pieces): +def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" -def render_pep440(pieces): +def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you @@ -753,23 +926,71 @@ def render_pep440(pieces): return rendered -def render_pep440_pre(pieces): - """TAG[.post0.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post0.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post0.dev%%d" %% pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%%d" %% (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered -def render_pep440_post(pieces): +def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards @@ -796,7 +1017,36 @@ def render_pep440_post(pieces): return rendered -def render_pep440_old(pieces): +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. @@ -818,7 +1068,7 @@ def render_pep440_old(pieces): return rendered -def render_git_describe(pieces): +def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. @@ -838,7 +1088,7 @@ def render_git_describe(pieces): return rendered -def render_git_describe_long(pieces): +def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. @@ -858,7 +1108,7 @@ def render_git_describe_long(pieces): return rendered -def render(pieces, style): +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", @@ -872,10 +1122,14 @@ def render(pieces, style): if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -890,7 +1144,7 @@ def render(pieces, style): "date": pieces.get("date")} -def get_versions(): +def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some @@ -911,7 +1165,7 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, @@ -938,39 +1192,42 @@ def get_versions(): @register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. - keywords = {} + keywords: Dict[str, str] = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature @@ -989,11 +1246,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -1002,7 +1259,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -1011,6 +1268,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue if verbose: print("picking %s" % r) return {"version": r, @@ -1026,7 +1288,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -1037,8 +1304,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -1046,24 +1320,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() - pieces = {} + pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -1080,7 +1387,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces @@ -1105,13 +1412,11 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] @@ -1120,7 +1425,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): return pieces -def do_vcs_install(manifest_in, versionfile_source, ipy): +def do_vcs_install(versionfile_source: str, ipy: Optional[str]) -> None: """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py @@ -1129,36 +1434,40 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] + files = [versionfile_source] if ipy: files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) + if "VERSIONEER_PEP518" not in globals(): + try: + my_path = __file__ + if my_path.endswith((".pyc", ".pyo")): + my_path = os.path.splitext(my_path)[0] + ".py" + versioneer_file = os.path.relpath(my_path) + except NameError: + versioneer_file = "versioneer.py" + files.append(versioneer_file) present = False try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: + with open(".gitattributes", "r") as fobj: + for line in fobj: + if line.strip().startswith(versionfile_source): + if "export-subst" in line.strip().split()[1:]: + present = True + break + except OSError: pass if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() + with open(".gitattributes", "a+") as fobj: + fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) -def versions_from_parentdir(parentdir_prefix, root, verbose): +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both @@ -1167,15 +1476,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % @@ -1184,7 +1492,7 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.19) from +# This file was generated by 'versioneer.py' (0.29) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. @@ -1201,12 +1509,12 @@ def get_versions(): """ -def versions_from_file(filename): +def versions_from_file(filename: str) -> Dict[str, Any]: """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() - except EnvironmentError: + except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) @@ -1218,9 +1526,8 @@ def versions_from_file(filename): return json.loads(mo.group(1)) -def write_to_version_file(filename, versions): +def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None: """Write the given version number to the given _version.py file.""" - os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: @@ -1229,14 +1536,14 @@ def write_to_version_file(filename, versions): print("set %s to '%s'" % (filename, versions["version"])) -def plus_or_dot(pieces): +def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" -def render_pep440(pieces): +def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you @@ -1261,23 +1568,71 @@ def render_pep440(pieces): return rendered -def render_pep440_pre(pieces): - """TAG[.post0.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post0.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post0.dev%d" % pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered -def render_pep440_post(pieces): +def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards @@ -1304,7 +1659,36 @@ def render_pep440_post(pieces): return rendered -def render_pep440_old(pieces): +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. @@ -1326,7 +1710,7 @@ def render_pep440_old(pieces): return rendered -def render_git_describe(pieces): +def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. @@ -1346,7 +1730,7 @@ def render_git_describe(pieces): return rendered -def render_git_describe_long(pieces): +def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. @@ -1366,7 +1750,7 @@ def render_git_describe_long(pieces): return rendered -def render(pieces, style): +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", @@ -1380,10 +1764,14 @@ def render(pieces, style): if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -1402,7 +1790,7 @@ class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" -def get_versions(verbose=False): +def get_versions(verbose: bool = False) -> Dict[str, Any]: """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. @@ -1417,7 +1805,7 @@ def get_versions(verbose=False): assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose + verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None` assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" @@ -1478,13 +1866,13 @@ def get_versions(verbose=False): "date": None} -def get_version(): +def get_version() -> str: """Get the short version string for this project.""" return get_versions()["version"] -def get_cmdclass(cmdclass=None): - """Get the custom setuptools/distutils subclasses used by Versioneer. +def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None): + """Get the custom setuptools subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. @@ -1502,25 +1890,25 @@ def get_cmdclass(cmdclass=None): # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. - # See https://github.com/python-versioneer/python-versioneer/issues/52 + # Also see https://github.com/python-versioneer/python-versioneer/issues/52 cmds = {} if cmdclass is None else cmdclass.copy() - # we add "version" to both distutils and setuptools - from distutils.core import Command + # we add "version" to setuptools + from setuptools import Command class cmd_version(Command): description = "report generated version string" - user_options = [] - boolean_options = [] + user_options: List[Tuple[str, str, str]] = [] + boolean_options: List[str] = [] - def initialize_options(self): + def initialize_options(self) -> None: pass - def finalize_options(self): + def finalize_options(self) -> None: pass - def run(self): + def run(self) -> None: vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) @@ -1530,7 +1918,7 @@ def run(self): print(" error: %s" % vers["error"]) cmds["version"] = cmd_version - # we override "build_py" in both distutils and setuptools + # we override "build_py" in setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py @@ -1545,20 +1933,25 @@ def run(self): # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? + # pip install -e . and setuptool/editable_wheel will invoke build_py + # but the build_py command is not expected to copy any files. + # we override different "build_py" commands for both environments if 'build_py' in cmds: - _build_py = cmds['build_py'] - elif "setuptools" in sys.modules: - from setuptools.command.build_py import build_py as _build_py + _build_py: Any = cmds['build_py'] else: - from distutils.command.build_py import build_py as _build_py + from setuptools.command.build_py import build_py as _build_py class cmd_build_py(_build_py): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) + if getattr(self, "editable_mode", False): + # During editable installs `.py` and data files are + # not copied to build_lib + return # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: @@ -1569,14 +1962,12 @@ def run(self): cmds["build_py"] = cmd_build_py if 'build_ext' in cmds: - _build_ext = cmds['build_ext'] - elif "setuptools" in sys.modules: - from setuptools.command.build_ext import build_ext as _build_ext + _build_ext: Any = cmds['build_ext'] else: - from distutils.command.build_ext import build_ext as _build_ext + from setuptools.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() @@ -1589,14 +1980,21 @@ def run(self): return # now locate _version.py in the new build/ directory and replace # it with an updated value + if not cfg.versionfile_build: + return target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_source) + cfg.versionfile_build) + if not os.path.exists(target_versionfile): + print(f"Warning: {target_versionfile} does not exist, skipping " + "version update. This can happen if you are running build_ext " + "without first running build_py.") + return print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_ext"] = cmd_build_ext if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe + from cx_Freeze.dist import build_exe as _build_exe # type: ignore # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ @@ -1605,7 +2003,7 @@ def run(self): # ... class cmd_build_exe(_build_exe): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() @@ -1628,10 +2026,13 @@ def run(self): del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? - from py2exe.distutils_buildexe import py2exe as _py2exe + try: + from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore + except ImportError: + from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore class cmd_py2exe(_py2exe): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() @@ -1652,16 +2053,51 @@ def run(self): }) cmds["py2exe"] = cmd_py2exe + # sdist farms its file list building out to egg_info + if 'egg_info' in cmds: + _egg_info: Any = cmds['egg_info'] + else: + from setuptools.command.egg_info import egg_info as _egg_info + + class cmd_egg_info(_egg_info): + def find_sources(self) -> None: + # egg_info.find_sources builds the manifest list and writes it + # in one shot + super().find_sources() + + # Modify the filelist and normalize it + root = get_root() + cfg = get_config_from_root(root) + self.filelist.append('versioneer.py') + if cfg.versionfile_source: + # There are rare cases where versionfile_source might not be + # included by default, so we must be explicit + self.filelist.append(cfg.versionfile_source) + self.filelist.sort() + self.filelist.remove_duplicates() + + # The write method is hidden in the manifest_maker instance that + # generated the filelist and was thrown away + # We will instead replicate their final normalization (to unicode, + # and POSIX-style paths) + from setuptools import unicode_utils + normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') + for f in self.filelist.files] + + manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') + with open(manifest_filename, 'w') as fobj: + fobj.write('\n'.join(normalized)) + + cmds['egg_info'] = cmd_egg_info + # we override different "sdist" commands for both environments if 'sdist' in cmds: - _sdist = cmds['sdist'] - elif "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist + _sdist: Any = cmds['sdist'] else: - from distutils.command.sdist import sdist as _sdist + from setuptools.command.sdist import sdist as _sdist class cmd_sdist(_sdist): - def run(self): + def run(self) -> None: versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old @@ -1669,7 +2105,7 @@ def run(self): self.distribution.metadata.version = versions["version"] return _sdist.run(self) - def make_release_tree(self, base_dir, files): + def make_release_tree(self, base_dir: str, files: List[str]) -> None: root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) @@ -1722,21 +2158,26 @@ def make_release_tree(self, base_dir, files): """ -INIT_PY_SNIPPET = """ +OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ +INIT_PY_SNIPPET = """ +from . import {0} +__version__ = {0}.get_versions()['version'] +""" -def do_setup(): + +def do_setup() -> int: """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, + except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): + if isinstance(e, (OSError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: @@ -1756,62 +2197,37 @@ def do_setup(): ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") + maybe_ipy: Optional[str] = ipy if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() - except EnvironmentError: + except OSError: old = "" - if INIT_PY_SNIPPET not in old: + module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] + snippet = INIT_PY_SNIPPET.format(module) + if OLD_SNIPPET in old: + print(" replacing boilerplate in %s" % ipy) + with open(ipy, "w") as f: + f.write(old.replace(OLD_SNIPPET, snippet)) + elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) + f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") + maybe_ipy = None # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) + do_vcs_install(cfg.versionfile_source, maybe_ipy) return 0 -def scan_setup_py(): +def scan_setup_py() -> int: """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False @@ -1848,10 +2264,14 @@ def scan_setup_py(): return errors +def setup_command() -> NoReturn: + """Set up Versioneer and exit with appropriate error code.""" + errors = do_setup() + errors += scan_setup_py() + sys.exit(1 if errors else 0) + + if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1) + setup_command() From dc4669c587ba1b3b4afd2eb62d8748b64af4896b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 26 Jul 2023 17:20:29 +0200 Subject: [PATCH 17/33] Use acceptor and connector keys instead of from and to in m2n config XML --- examples/solverdummy/precice-config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/solverdummy/precice-config.xml b/examples/solverdummy/precice-config.xml index 7f199aee..6051626a 100644 --- a/examples/solverdummy/precice-config.xml +++ b/examples/solverdummy/precice-config.xml @@ -45,7 +45,7 @@ - + From 216f9bc4c37182a9959281e55d1fc434058fbee9 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 27 Jul 2023 15:38:59 +0200 Subject: [PATCH 18/33] post-tag bump From cd446d2807b841d81a4cf5c9dd6656ab43c278c3 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 1 Aug 2023 17:52:38 -0400 Subject: [PATCH 19/33] Remove API functions has_mesh and has_data and rename get_mesh_vertices_and_ids to get_mesh_vertices_and_coordinates --- CHANGELOG.md | 4 ++++ cyprecice/Participant.pxd | 6 +----- cyprecice/cyprecice.pyx | 40 ++---------------------------------- test/Participant.cpp | 17 +-------------- test/test_bindings_module.py | 4 ++-- 5 files changed, 10 insertions(+), 61 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9a2b46b..10db1619 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to this project will be documented in this file. +## latest + +* Remove API functions `has_mesh` and `has_data` and rename `get_mesh_vertices_and_ids` to `get_mesh_vertices_and_coordinates`. + ## 3.0.0.0dev0 * Update to API introduced in preCICE v3.0.0. https://github.com/precice/python-bindings/pull/169 https://github.com/precice/python-bindings/pull/174 https://github.com/precice/python-bindings/pull/179 diff --git a/cyprecice/Participant.pxd b/cyprecice/Participant.pxd index 355237af..9cec7594 100644 --- a/cyprecice/Participant.pxd +++ b/cyprecice/Participant.pxd @@ -41,8 +41,6 @@ cdef extern from "precice/Participant.hpp" namespace "precice": # mesh access - bool hasMesh (const string& meshName) const - bool requiresMeshConnectivityFor (const string& meshName) const int setMeshVertex (const string& meshName, vector[double] position) @@ -69,8 +67,6 @@ cdef extern from "precice/Participant.hpp" namespace "precice": # data access - bool hasData (const string& dataName, const string& meshName) const - void writeData (const string& meshName, const string& dataName, vector[int] vertices, vector[double] values) void readData (const string& meshName, const string& dataName, vector[int] vertices, const double relativeReadTime, vector[double]& values) const @@ -79,7 +75,7 @@ cdef extern from "precice/Participant.hpp" namespace "precice": void setMeshAccessRegion (const string& meshName, vector[double] boundingBox) const - void getMeshVerticesAndIDs (const string& meshName, vector[int]& ids, vector[double]& coordinates) const + void getMeshVerticesAndCoordinates (const string& meshName, vector[int]& ids, vector[double]& coordinates) const # Gradient related API diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 303d5b44..0551bdd2 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -319,23 +319,6 @@ cdef class Participant: # mesh access - def has_mesh(self, mesh_name): - """ - Checks if the mesh with the given name is used by a solver. - - Parameters - ---------- - mesh_name : string - Name of the mesh. - - Returns - ------- - tag : bool - Returns true is the mesh is used. - """ - return self.thisptr.hasMesh (convert(mesh_name)) - - def requires_mesh_connectivity_for (self, mesh_name): """ Checks if the given mesh requires connectivity. @@ -729,25 +712,6 @@ cdef class Participant: # data access - def has_data (self, data_name, mesh_name): - """ - Checks if the data with given name is used by a solver and mesh. - - Parameters - ---------- - data_name : string - Name of the data. - mesh_name : str - Name of the associated mesh. - - Returns - ------- - tag : bool - True if the mesh is already used. - """ - return self.thisptr.hasData(convert(data_name), convert(mesh_name)) - - def write_data (self, mesh_name, data_name, vertex_ids, values): """ This function writes values of specified vertices to data of a mesh. @@ -1049,7 +1013,7 @@ cdef class Participant: self.thisptr.setMeshAccessRegion(convert(mesh_name), cpp_bounding_box) - def get_mesh_vertices_and_ids (self, mesh_name): + def get_mesh_vertices_and_coordinates (self, mesh_name): """ Iterating over the region of interest defined by bounding boxes and reading the corresponding coordinates omitting the mapping. This function is still experimental. @@ -1074,7 +1038,7 @@ cdef class Participant: cdef vector[int] cpp_ids = [-1 for _ in range(size)] cdef vector[double] cpp_coordinates = [-1 for _ in range(size * dimensions)] - self.thisptr.getMeshVerticesAndIDs(convert(mesh_name), cpp_ids, cpp_coordinates) + self.thisptr.getMeshVerticesAndCoordinates(convert(mesh_name), cpp_ids, cpp_coordinates) cdef np.ndarray[int, ndim=1] np_ids = np.array(cpp_ids, dtype=np.int32) cdef np.ndarray[double, ndim=1] np_coordinates = np.array(cpp_coordinates, dtype=np.double) diff --git a/test/Participant.cpp b/test/Participant.cpp index ea9b35dc..41054ee2 100644 --- a/test/Participant.cpp +++ b/test/Participant.cpp @@ -139,13 +139,6 @@ bool Participant:: requiresWritingCheckpoint() return 0; } -bool Participant:: hasMesh -( - precice::string_view meshName ) const -{ - return 0; -} - bool Participant:: requiresMeshConnectivityFor ( precice::string_view meshName) const @@ -161,14 +154,6 @@ bool Participant:: requiresGradientDataFor return 0; } -bool Participant:: hasData -( - precice::string_view meshName, - precice::string_view dataName) const -{ - return 0; -} - int Participant:: setMeshVertex ( precice::string_view meshName, @@ -300,7 +285,7 @@ void Participant:: setMeshAccessRegion } } -void Participant:: getMeshVerticesAndIDs +void Participant:: getMeshVerticesAndCoordinates ( precice::string_view meshName, precice::span valueIndices, diff --git a/test/test_bindings_module.py b/test/test_bindings_module.py index 6d8ae041..75f610a8 100644 --- a/test/test_bindings_module.py +++ b/test/test_bindings_module.py @@ -327,7 +327,7 @@ def test_set_mesh_access_region(self): fake_bounding_box = np.arange(fake_dimension * 2) participant.set_mesh_access_region(fake_mesh_name, fake_bounding_box) - def test_get_mesh_vertices_and_ids(self): + def test_get_mesh_vertices_and_coordinates(self): participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices @@ -338,7 +338,7 @@ def test_get_mesh_vertices_and_ids(self): coordinates[i, 0] = i * fake_dimension coordinates[i, 1] = i * fake_dimension + 1 coordinates[i, 2] = i * fake_dimension + 2 - fake_ids, fake_coordinates = participant.get_mesh_vertices_and_ids(fake_mesh_name) + fake_ids, fake_coordinates = participant.get_mesh_vertices_and_coordinates(fake_mesh_name) self.assertTrue(np.array_equal(fake_ids, vertex_ids)) self.assertTrue(np.array_equal(fake_coordinates, coordinates)) From 16f2234a3839314dbc65532183ff9881525f696c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 1 Aug 2023 17:59:34 -0400 Subject: [PATCH 20/33] Use correct function name: getMeshVertexIDsAndCoordinates --- cyprecice/Participant.pxd | 2 +- cyprecice/cyprecice.pyx | 4 ++-- test/Participant.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cyprecice/Participant.pxd b/cyprecice/Participant.pxd index 9cec7594..ffe9783b 100644 --- a/cyprecice/Participant.pxd +++ b/cyprecice/Participant.pxd @@ -75,7 +75,7 @@ cdef extern from "precice/Participant.hpp" namespace "precice": void setMeshAccessRegion (const string& meshName, vector[double] boundingBox) const - void getMeshVerticesAndCoordinates (const string& meshName, vector[int]& ids, vector[double]& coordinates) const + void getMeshVertexIDsAndCoordinates (const string& meshName, vector[int]& ids, vector[double]& coordinates) const # Gradient related API diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index 0551bdd2..d8c08d7e 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -1013,7 +1013,7 @@ cdef class Participant: self.thisptr.setMeshAccessRegion(convert(mesh_name), cpp_bounding_box) - def get_mesh_vertices_and_coordinates (self, mesh_name): + def get_mesh_vertex_ids_and_coordinates (self, mesh_name): """ Iterating over the region of interest defined by bounding boxes and reading the corresponding coordinates omitting the mapping. This function is still experimental. @@ -1038,7 +1038,7 @@ cdef class Participant: cdef vector[int] cpp_ids = [-1 for _ in range(size)] cdef vector[double] cpp_coordinates = [-1 for _ in range(size * dimensions)] - self.thisptr.getMeshVerticesAndCoordinates(convert(mesh_name), cpp_ids, cpp_coordinates) + self.thisptr.getMeshVertexIDsAndCoordinates(convert(mesh_name), cpp_ids, cpp_coordinates) cdef np.ndarray[int, ndim=1] np_ids = np.array(cpp_ids, dtype=np.int32) cdef np.ndarray[double, ndim=1] np_coordinates = np.array(cpp_coordinates, dtype=np.double) diff --git a/test/Participant.cpp b/test/Participant.cpp index 41054ee2..8a36f2af 100644 --- a/test/Participant.cpp +++ b/test/Participant.cpp @@ -285,7 +285,7 @@ void Participant:: setMeshAccessRegion } } -void Participant:: getMeshVerticesAndCoordinates +void Participant:: getMeshVertexIDsAndCoordinates ( precice::string_view meshName, precice::span valueIndices, From cac5ba5b347380156c5feb10719a0d617effb7f1 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 1 Aug 2023 18:06:59 -0400 Subject: [PATCH 21/33] Fixing tests --- test/test_bindings_module.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_bindings_module.py b/test/test_bindings_module.py index 75f610a8..a34c3983 100644 --- a/test/test_bindings_module.py +++ b/test/test_bindings_module.py @@ -327,7 +327,7 @@ def test_set_mesh_access_region(self): fake_bounding_box = np.arange(fake_dimension * 2) participant.set_mesh_access_region(fake_mesh_name, fake_bounding_box) - def test_get_mesh_vertices_and_coordinates(self): + def test_get_mesh_vertex_ids_and_coordinates(self): participant = precice.Participant("test", "dummy.xml", 0, 1) fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices @@ -338,7 +338,7 @@ def test_get_mesh_vertices_and_coordinates(self): coordinates[i, 0] = i * fake_dimension coordinates[i, 1] = i * fake_dimension + 1 coordinates[i, 2] = i * fake_dimension + 2 - fake_ids, fake_coordinates = participant.get_mesh_vertices_and_coordinates(fake_mesh_name) + fake_ids, fake_coordinates = participant.get_mesh_vertex_ids_and_coordinates(fake_mesh_name) self.assertTrue(np.array_equal(fake_ids, vertex_ids)) self.assertTrue(np.array_equal(fake_coordinates, coordinates)) From d6386ead4d7454e0ce7619a356f16df46c258a0c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 8 Aug 2023 13:46:27 -0400 Subject: [PATCH 22/33] Add tag_prefix = v to versioneer configuration in setup.cfg --- precice/__init__.py | 3 +++ precice/_version.py | 2 +- setup.cfg | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/precice/__init__.py b/precice/__init__.py index 0022eb2f..b5df19cf 100644 --- a/precice/__init__.py +++ b/precice/__init__.py @@ -7,3 +7,6 @@ __version__ = get_versions()['version'] del get_versions + +from . import _version +__version__ = _version.get_versions()['version'] diff --git a/precice/_version.py b/precice/_version.py index 81f74377..13b32400 100644 --- a/precice/_version.py +++ b/precice/_version.py @@ -51,7 +51,7 @@ def get_config() -> VersioneerConfig: cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" - cfg.tag_prefix = "" + cfg.tag_prefix = "v" cfg.parentdir_prefix = "precice-" cfg.versionfile_source = "precice/_version.py" cfg.verbose = False diff --git a/setup.cfg b/setup.cfg index b6c94a76..e115823d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,5 +17,5 @@ VCS = git style = pep440 versionfile_source = precice/_version.py versionfile_build = precice/_version.py -tag_prefix = +tag_prefix = v parentdir_prefix = precice- From 23a840144c2647d6cf09c0ed87be3b768a22feb7 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 8 Aug 2023 13:56:38 -0400 Subject: [PATCH 23/33] Update CMake flags in preCICE source installation commands --- .github/workflows/build-and-test.yml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 727ff29e..3e6a4f51 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -107,7 +107,7 @@ jobs: cp precice-core/src/precice/Tooling.hpp precice/Tooling.cpp cd precice-core mkdir build && cd build - cmake .. -DPRECICE_MPICommunication=OFF -DPRECICE_PETScMapping=OFF -DPRECICE_PythonActions=OFF -DBUILD_TESTING=OFF + cmake .. -DPRECICE_FEATURE_MPI_COMMUNICATION=OFF -DPRECICE_FEATURE_PETSC_MAPPING=OFF -DPRECICE_FEATURE_PYTHON_ACTIONS=OFF -DBUILD_TESTING=OFF - name: Install dependencies run: | python3 -c 'import toml; c = toml.load("pyproject.toml"); print("\n".join(c["build-system"]["requires"]))' | pip3 install -r /dev/stdin diff --git a/README.md b/README.md index cbe6d362..bbbd5a54 100644 --- a/README.md +++ b/README.md @@ -262,7 +262,7 @@ Note that -DPYTHON_LIBRARY expects a python shared library. You can likely modif ```bash mkdir build && cd build -cmake -DBUILD_SHARED_LIBS=ON -DPRECICE_PETScMapping=OFF -DPRECICE_PythonActions=ON -DCMAKE_INSTALL_PREFIX=/path/to/precice -DCMAKE_BUILD_TYPE=Debug .. -DPYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") -DPYTHON_LIBRARY=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR')+'/libpython2.7.so')") -DNumPy_INCLUDE_DIR=$(python -c "import numpy; print(numpy.get_include())") +cmake -DBUILD_SHARED_LIBS=ON -DPRECICE_FEATURE_PETSC_MAPPING=OFF -DPRECICE_FEATURE_PYTHON_ACTIONS=ON -DCMAKE_INSTALL_PREFIX=/path/to/precice -DCMAKE_BUILD_TYPE=Debug .. -DPYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") -DPYTHON_LIBRARY=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR')+'/libpython2.7.so')") -DNumPy_INCLUDE_DIR=$(python -c "import numpy; print(numpy.get_include())") make -j 12 make install ``` From f9da44a56c83e405188d975f81a623e65cad8261 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 8 Aug 2023 14:02:22 -0400 Subject: [PATCH 24/33] Add correct CMake MPI flag in Action --- .github/workflows/run-solverdummy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run-solverdummy.yml b/.github/workflows/run-solverdummy.yml index 4f1d146d..1704a108 100644 --- a/.github/workflows/run-solverdummy.yml +++ b/.github/workflows/run-solverdummy.yml @@ -25,7 +25,7 @@ jobs: - name: Install bindings run: pip3 install --user . - name: Check whether preCICE was built with MPI # reformat version information as a dict and check whether preCICE was compiled with MPI - run: python3 -c "import precice; assert({item.split('=')[0]:item.split('=')[-1] for item in str(precice.get_version_information()).split(';')}['PRECICE_MPICommunication']=='Y')" + run: python3 -c "import precice; assert({item.split('=')[0]:item.split('=')[-1] for item in str(precice.get_version_information()).split(';')}['PRECICE_FEATURE_MPI_COMMUNICATION']=='Y')" - name: Run solverdummies run: | cd examples/solverdummy/ From cf08e63066b4d26b056a4bb0f26d32dd45f44841 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 8 Aug 2023 14:21:50 -0400 Subject: [PATCH 25/33] Update CHANEGLOG and remove pre-release entry from it --- CHANGELOG.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10db1619..9b28ecbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,11 +4,9 @@ All notable changes to this project will be documented in this file. ## latest -* Remove API functions `has_mesh` and `has_data` and rename `get_mesh_vertices_and_ids` to `get_mesh_vertices_and_coordinates`. - -## 3.0.0.0dev0 - -* Update to API introduced in preCICE v3.0.0. https://github.com/precice/python-bindings/pull/169 https://github.com/precice/python-bindings/pull/174 https://github.com/precice/python-bindings/pull/179 +* Update CMake configuration flags for preCICE source installation in Actions. https://github.com/precice/python-bindings/commit/23a840144c2647d6cf09c0ed87be3b768a22feb7 +* Use `tag_prefix = v` in versioneer configuration in setup.cfg. https://github.com/precice/python-bindings/commit/d6386ead4d7454e0ce7619a356f16df46c258a0c +* Remove API functions `has_mesh` and `has_data` and rename `get_mesh_vertices_and_ids` to `get_mesh_vertices_and_coordinates`. https://github.com/precice/python-bindings/commit/cd446d2807b841d81a4cf5c9dd6656ab43c278c3 ## 2.5.0.3 From c035885e7804f0350a86ce4962a5b07384b6a7c7 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 8 Aug 2023 14:55:37 -0400 Subject: [PATCH 26/33] Use tag_prefix = v in setup.cfg and bump version --- CHANGELOG.md | 4 ++++ setup.cfg | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7db41cac..d09c07f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to this project will be documented in this file. +## 2.5.0.4 + +* Add `tag_prefix = v` in versioneer configuration of `setup.cfg`. + ## 2.5.0.3 * Update from versioneer 0.19 to 0.29. diff --git a/setup.cfg b/setup.cfg index b6c94a76..e115823d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,5 +17,5 @@ VCS = git style = pep440 versionfile_source = precice/_version.py versionfile_build = precice/_version.py -tag_prefix = +tag_prefix = v parentdir_prefix = precice- From 6a839a70ea0121d716587d4ea10145442f12e03f Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 8 Aug 2023 16:39:35 -0400 Subject: [PATCH 27/33] Run versioneer and update settings --- precice/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/precice/_version.py b/precice/_version.py index 81f74377..13b32400 100644 --- a/precice/_version.py +++ b/precice/_version.py @@ -51,7 +51,7 @@ def get_config() -> VersioneerConfig: cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" - cfg.tag_prefix = "" + cfg.tag_prefix = "v" cfg.parentdir_prefix = "precice-" cfg.versionfile_source = "precice/_version.py" cfg.verbose = False From 51cf4fb34213347fd67ba03c0a59c6e953db550d Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 9 Aug 2023 08:56:23 -0400 Subject: [PATCH 28/33] post-tag bump From bb55349fc1d46501bc7065b89ba2d45c09b8fb78 Mon Sep 17 00:00:00 2001 From: valentin-seitz Date: Wed, 9 Aug 2023 18:43:09 +0200 Subject: [PATCH 29/33] Adapt Docker workflow according to new system tests design (#186) * - modify args to fit to systemtest - make the manual triggering more useful - move latest tag to last master * bash string handling... Fun stuff * sprinkle in some " * adding some "debug" ouptut * Update .github/workflows/build-docker.yml Co-authored-by: Gerasimos Chourdakis * Update .github/workflows/build-docker.yml Co-authored-by: Gerasimos Chourdakis --------- Co-authored-by: Valentin Seitz Co-authored-by: Gerasimos Chourdakis --- .github/workflows/build-docker.yml | 39 +++++++++++++++++---- tools/releasing/packaging/docker/Dockerfile | 9 +++-- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index f512622f..ea12b8cd 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -1,10 +1,19 @@ name: Update docker image on: - workflow_dispatch: # Trigger by hand from the UI + workflow_dispatch: # Trigger by hand from the UI + inputs: + branch: + type: choice + description: branch to build the image from + options: + - develop + - master push: branches: - develop + - master + jobs: build-and-release-docker-image: @@ -13,10 +22,26 @@ jobs: env: docker_username: precice steps: - - name: Get branch name - if: github.event_name != 'pull_request' + - name: Set branch name for manual triggering + if: github.event_name == 'workflow_dispatch' + shell: bash + run: echo "BINDINGS_REF=${{ inputs.branch }}" >> $GITHUB_ENV + - name: Set branch name for "on pull request" triggering + if: github.event_name != 'pull_request' && github.event_name != 'workflow_dispatch' + shell: bash + run: echo "BINDINGS_REF=${{ github.ref_name }}" >> $GITHUB_ENV + - name: Set PRECICE_TAG and the TAG depending on branch shell: bash - run: echo "branch=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV + run: | + if [[ '${{ env.BINDINGS_REF }}' == 'master' ]]; then + echo "PRECICE_TAG=latest" >> "$GITHUB_ENV" + echo "TAG=latest" >> "$GITHUB_ENV" + echo "Building TAG: latest" + else + echo "PRECICE_TAG=${{ env.BINDINGS_REF }}" >> "$GITHUB_ENV" + echo "TAG=${{ env.BINDINGS_REF }}" >> "$GITHUB_ENV" + echo "Building TAG: ${{ env.BINDINGS_REF }}" + fi - name: Checkout Repository uses: actions/checkout@v2 - name: Set up Docker Buildx @@ -31,7 +56,7 @@ jobs: with: push: true file: "./tools/releasing/packaging/docker/Dockerfile" - tags: ${{ env.docker_username }}/python-bindings:${{ env.branch }},${{ env.docker_username }}/python-bindings:latest + tags: ${{ env.docker_username }}/python-bindings:${{ env.TAG }} build-args: | - branch=${{ env.branch }} - from=precice/precice:develop + PRECICE_TAG=${{ env.PRECICE_TAG }} + PYTHON_BINDINGS_REF=${{ env.BINDINGS_REF }} diff --git a/tools/releasing/packaging/docker/Dockerfile b/tools/releasing/packaging/docker/Dockerfile index 2074f1bb..6eb6254c 100644 --- a/tools/releasing/packaging/docker/Dockerfile +++ b/tools/releasing/packaging/docker/Dockerfile @@ -1,6 +1,6 @@ # Dockerfile to build a ubuntu image containing the installed Debian package of a release -ARG branch=develop -ARG from=precice/precice:latest +ARG PRECICE_TAG=develop +ARG from=precice/precice:${PRECICE_TAG} FROM $from USER root @@ -42,8 +42,7 @@ SHELL ["/bin/bash", "-c"] RUN python3 -m pip install --user --upgrade pip # Rebuild image if force_rebuild after that command -ARG CACHEBUST -ARG branch=develop +ARG PYTHON_BINDINGS_REF=develop # Builds the precice python bindings for python3 -RUN pip3 install --user git+https://github.com/precice/python-bindings.git@$branch +RUN pip3 install --user git+https://github.com/precice/python-bindings.git@${PYTHON_BINDINGS_REF} From 5f800b8882fb558d49ade6b476505ea3705782c4 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 25 Aug 2023 14:26:53 -0400 Subject: [PATCH 30/33] Remove experimental warning for direct mesh access functions --- cyprecice/cyprecice.pyx | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cyprecice/cyprecice.pyx b/cyprecice/cyprecice.pyx index d8c08d7e..a2897186 100644 --- a/cyprecice/cyprecice.pyx +++ b/cyprecice/cyprecice.pyx @@ -997,8 +997,6 @@ cdef class Participant: 0.5, i.e. the defined access region as computed through the involved provided mesh is by 50% enlarged. """ - warnings.warn("The function set_mesh_access_region is still experimental.") - check_array_like(bounding_box, "bounding_box", "set_mesh_access_region") if not isinstance(bounding_box, np.ndarray): @@ -1030,8 +1028,6 @@ cdef class Participant: coordinates : numpy.ndarray he coordinates associated to the IDs and corresponding data values (dim * size) """ - warnings.warn("The function get_mesh_vertices_and_ids is still experimental.") - size = self.get_mesh_vertex_size(mesh_name) dimensions = self.get_mesh_dimensions(mesh_name) From f9498ccd087083c8047782bf55facb2794052713 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 25 Aug 2023 14:33:54 -0400 Subject: [PATCH 31/33] Move dimensions attribute from precice-configuration to mesh --- examples/solverdummy/precice-config.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/solverdummy/precice-config.xml b/examples/solverdummy/precice-config.xml index 6051626a..175b22f3 100644 --- a/examples/solverdummy/precice-config.xml +++ b/examples/solverdummy/precice-config.xml @@ -1,5 +1,5 @@ - + - + - + From e03ba33fbb442c82a08321487234f076053a92cb Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Sat, 13 Jan 2024 10:41:04 +0100 Subject: [PATCH 32/33] Remove unnecessary statement Additionally, assuming subcycling is used, I would even consider calling get_max_time_step_size directly before advance an anti-pattern. --- examples/solverdummy/solverdummy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/solverdummy/solverdummy.py b/examples/solverdummy/solverdummy.py index c2ac95b2..7143617d 100644 --- a/examples/solverdummy/solverdummy.py +++ b/examples/solverdummy/solverdummy.py @@ -69,7 +69,6 @@ participant.write_data(mesh_name, write_data_name, vertex_ids, write_data) print("DUMMY: Advancing in time") - dt = participant.get_max_time_step_size() participant.advance(dt) if participant.requires_reading_checkpoint(): From 1c25e1324af72bf50deb222fd7e33d2a49e4725c Mon Sep 17 00:00:00 2001 From: Benjamin Rodenberg Date: Mon, 15 Jan 2024 12:12:17 +0100 Subject: [PATCH 33/33] Update configuration file w.r.t min-iterations. --- examples/solverdummy/precice-config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/solverdummy/precice-config.xml b/examples/solverdummy/precice-config.xml index 175b22f3..28fd5246 100644 --- a/examples/solverdummy/precice-config.xml +++ b/examples/solverdummy/precice-config.xml @@ -51,8 +51,8 @@ + -