diff --git a/.github/workflows/ci-cd.yaml b/.github/workflows/ci-cd.yaml index cf4ec37..12380ac 100644 --- a/.github/workflows/ci-cd.yaml +++ b/.github/workflows/ci-cd.yaml @@ -16,54 +16,55 @@ on: types: [create-release] jobs: - nipype-conv: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Revert version to most recent tag on upstream update if: github.event_name == 'repository_dispatch' run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') + - name: Show file tree + run: tree . - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Install build dependencies run: python -m pip install --upgrade pip - name: Install requirements - run: python -m pip install ./related-packages/fileformats -r ./nipype-auto-conv/requirements.txt + run: python -m pip install -r ./nipype-auto-conv/requirements.txt - name: Run automatic Nipype > Pydra conversion run: ./nipype-auto-conv/generate - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: converted-nipype - path: pydra/tasks/anatomical/auto + path: pydra/tasks/mriqc devcheck: needs: [nipype-conv] runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.8', '3.11'] # Check oldest and newest versions + python-version: ['3.10', '3.12'] # Check oldest and newest versions pip-flags: ['', '--editable'] pydra: - 'pydra' - '--editable git+https://github.com/nipype/pydra.git#egg=pydra' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Revert version to most recent tag on upstream update if: github.event_name == 'repository_dispatch' run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - name: Download tasks converted from Nipype - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: converted-nipype - path: pydra/tasks/anatomical/auto + path: pydra/tasks/mriqc - name: Strip auto package from gitignore so it is included in package run: | - sed -i '/\/pydra\/tasks\/anatomical\/auto/d' .gitignore + sed -i '/\/pydra/d' .gitignore - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install build dependencies @@ -77,207 +78,104 @@ jobs: python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - name: Install task package run: | - pip install "./related-packages/fileformats[dev]" "related-packages/fileformats-extras[dev]" pip install ${{ matrix.pip-flags }} ".[dev]" - python -c "import pydra.tasks.anatomical as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - python -c "import fileformats.medimage_anatomical as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - python -c "import fileformats.extras.medimage_anatomical as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - - fileformats-test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ['3.8', '3.11'] - steps: - - uses: actions/checkout@v3 - - name: Revert version to most recent tag on upstream update - if: github.event_name == 'repository_dispatch' - run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install build dependencies - run: | - python -m pip install --upgrade pip - - name: Install task package - run: | - pip install "./related-packages/fileformats[test]" "./related-packages/fileformats-extras[test]" - python -c "import fileformats.medimage_anatomical as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - - name: Test fileformats with pytest - run: | - cd ./fileformats - pytest -sv --cov fileformats.medimage_anatomical --cov fileformats.extras.medimage_anatomical --cov-report xml . + test: - needs: [nipype-conv, fileformats-test] + needs: [nipype-conv] runs-on: ubuntu-22.04 strategy: matrix: - python-version: ['3.8'] # '3.11' + python-version: ['3.10', '3.12'] + steps: + - name: Removed unnecessary tools to free space run: | sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - - name: Get Download cache Key - id: cache-key - run: echo "::set-output name=key::anatomical-linux-ubuntu22_amd64-7.4.1" - - name: Cache FreeSurfer - uses: actions/cache@v2 - with: - path: $HOME/downloads/anatomical - key: ${{ steps.cache-key.outputs.key }} - restore-keys: | - anatomical-linux-ubuntu22_amd64-7.4.1 - - name: Download FreeSurfer - if: steps.cache-key.outputs.key != steps.cache-hit.outputs.key - run: | - mkdir -p $HOME/downloads/anatomical - curl -s -o $HOME/downloads/anatomical/anatomical-linux-ubuntu22_amd64-7.4.1.tar.gz https://surfer.nmr.mgh.harvard.edu/pub/dist/anatomical/7.4.1/anatomical-linux-ubuntu22_amd64-7.4.1.tar.gz - shell: bash - - name: Install Freesurfer - env: - FREESURFER_LICENCE: ${{ secrets.FREESURFER_LICENCE }} - run: | - pushd $HOME/downloads/anatomical - tar -zxpf anatomical-linux-ubuntu22_amd64-7.4.1.tar.gz - mv anatomical $HOME/ - popd - export FREESURFER_HOME=$HOME/anatomical - source $FREESURFER_HOME/SetUpFreeSurfer.sh - echo $FREESURFER_LICENCE > $FREESURFER_HOME/license.txt - export PATH=$FREESURFER_HOME/bin:$PATH - - uses: actions/checkout@v3 + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + + - uses: actions/checkout@v4 - name: Revert version to most recent tag on upstream update if: github.event_name == 'repository_dispatch' run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') + - name: Download tasks converted from Nipype - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: converted-nipype - path: pydra/tasks/anatomical/auto - - name: Strip auto package from gitignore so it is included in package + path: pydra/tasks/mriqc + + - name: Strip pydra package from gitignore so it is included in package run: | - sed -i '/\/src\/pydra\/tasks\/anatomical\/auto/d' .gitignore + sed -i '/\/pydra/d' .gitignore + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + - name: Install build dependencies run: | python -m pip install --upgrade pip + - name: Install task package run: | - pip install "./related-packages/fileformats" "./related-packages/fileformats-extras" ".[test]" - python -c "import pydra.tasks.anatomical as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" + pip install ".[test]" + python -c "import pydra.tasks.mriqc as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" + + - name: Install dev Pydra version for now until it is merged + run: pip install --upgrade git+https://github.com/nipype/pydra.git@typing-bugfixes + + - name: Set environment variables required for mocking tools + run: | + echo "$(pwd)/mock-tools" >> $GITHUB_PATH + echo "export FSLDIR=$(pwd)/mock-tools" >> $GITHUB_ENV + echo "export FSLOUTPUTTYPE=NIFTI_GZ" >> $GITHUB_ENV + - name: Test with pytest run: | - pytest -sv --doctest-modules ./pydra/tasks/anatomical \ - --cov pydra.tasks.anatomical --cov-report xml + pytest -sv ./pydra --cov pydra.tasks.mriqc --cov-report xml + - uses: codecov/codecov-action@v3 if: ${{ always() }} with: - files: coverage.xml,./fileformats/coverage.xml - name: pydra-anatomical - - deploy-fileformats: - needs: [devcheck, test] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install build tools - run: python -m pip install build twine - - name: Build source and wheel distributions - run: python -m build ./related-packages/fileformats - - name: Check distributions - run: twine check ./related-packages/fileformats/dist/* - - name: Check for PyPI token on tag - id: deployable - if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' - env: - PYPI_API_TOKEN: "${{ secrets.PYPI_FILEFORMATS_API_TOKEN }}" - run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi - - name: Upload to PyPI - if: steps.deployable.outputs.DEPLOY - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_FILEFORMATS_API_TOKEN }} - packages-dir: ./related-packages/fileformats/dist - - deploy-fileformats-extras: - needs: [deploy-fileformats] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install build tools - run: python -m pip install build twine - - name: Build source and wheel distributions - run: python -m build ./related-packages/fileformats-extras - - name: Check distributions - run: twine check ./related-packages/fileformats-extras/dist/* - - name: Check for PyPI token on tag - id: deployable - if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' - env: - PYPI_API_TOKEN: "${{ secrets.PYPI_FILEFORMATS_EXTRAS_API_TOKEN }}" - run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi - - name: Upload to PyPI - if: steps.deployable.outputs.DEPLOY - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_FILEFORMATS_EXTRAS_API_TOKEN }} - packages-dir: ./related-packages/fileformats-extras/dist + files: coverage.xml + name: pydra-mriqc deploy: - needs: [deploy-fileformats-extras] + needs: [test] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 - name: Download tasks converted from Nipype - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: converted-nipype - path: pydra/tasks/anatomical/auto + path: pydra/tasks/mriqc - name: Tag release with a post-release based on Nipype and Nipype2Pydra versions if: github.event_name == 'repository_dispatch' run: | TAG=$(git tag -l | tail -n 1 | awk -F post '{print $1}') - POST=$(python -c "from pydra.tasks.anatomical.auto._version import *; print(post_release)") + POST=$(python -c "from pydra.tasks.mriqc._post_release import *; print(post_release)") git checkout $TAG - git add -f pydra/tasks/anatomical/auto/_version.py + git add -f pydra/tasks/mriqc/_version.py git commit -am"added auto-generated version to make new tag for package version" git tag ${TAG}post${POST} - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: '3.11' + python-version: '3.12' - name: Install build tools run: python -m pip install build twine - - name: Strip auto package from gitignore so it is included in package + - name: Strip pydra package from gitignore so it is included in package run: | - sed -i '/\/pydra\/tasks\/anatomical\/auto/d' .gitignore + sed -i '/\/pydra/d' .gitignore - name: Build source and wheel distributions run: python -m build . - name: Check distributions @@ -288,7 +186,7 @@ jobs: path: dist/ - name: Check for PyPI token on tag id: deployable - if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' + if: github.event_name == 'release' env: PYPI_API_TOKEN: "${{ secrets.PYPI_API_TOKEN }}" run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi @@ -302,4 +200,4 @@ jobs: # Deploy on tags if PYPI_API_TOKEN is defined in the repository secrets. # Secrets are not accessible in the if: condition [0], so set an output variable [1] # [0] https://github.community/t/16928 -# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter \ No newline at end of file +# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter diff --git a/.gitignore b/.gitignore index 5b8f157..970d75d 100644 --- a/.gitignore +++ b/.gitignore @@ -137,6 +137,4 @@ dmypy.json # Mac garbarge .DS_store -/pydra/tasks/mriqc/interfaces -/pydra/tasks/mriqc/_version.py -/pydra/tasks/mriqc/auto \ No newline at end of file +/pydra diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..180b937 --- /dev/null +++ b/AUTHORS @@ -0,0 +1 @@ +# Enter list of names and emails of contributors to this package \ No newline at end of file diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..fd1b4f4 --- /dev/null +++ b/NOTICE @@ -0,0 +1,6 @@ +Pydra-mriqc +Copyright 2024 Pydra Development Team + +The bases for the task interfaces defined in this package were semi-automatically converted +from Nipype interfaces (https://github.com/nipy/nipype) using the Nipype2Pydra tool +(https://github.com/nipype/nipype2pydra). diff --git a/README.md b/README.md deleted file mode 100644 index 7f28f39..0000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# pydra-mriqc -pydra version of nipreps/mriqc diff --git a/README.rst b/README.rst index bd41ece..685dd2a 100644 --- a/README.rst +++ b/README.rst @@ -1,20 +1,20 @@ -=============================== -Pydra task package for anatomical -=============================== - -.. image:: https://github.com/nipype/pydra-anatomical/actions/workflows/pythonpackage.yaml/badge.svg - :target: https://github.com/nipype/pydra-anatomical/actions/workflows/pythonpackage.yaml -.. .. image:: https://codecov.io/gh/nipype/pydra-anatomical/branch/main/graph/badge.svg?token=UIS0OGPST7 -.. :target: https://codecov.io/gh/nipype/pydra-anatomical -.. image:: https://img.shields.io/pypi/pyversions/pydra-anatomical.svg - :target: https://pypi.python.org/pypi/pydra-anatomical/ +============================ +Pydra task package for mriqc +============================ + +.. image:: https://github.com/nipype/pydra-mriqc/actions/workflows/ci-cd.yaml/badge.svg + :target: https://github.com/nipype/pydra-mriqc/actions/workflows/ci-cd.yaml +.. image:: https://codecov.io/gh/nipype/pydra-mriqc/branch/main/graph/badge.svg?token=UIS0OGPST7 + :target: https://codecov.io/gh/nipype/pydra-mriqc +.. image:: https://img.shields.io/pypi/pyversions/pydra-mriqc.svg + :target: https://pypi.python.org/pypi/pydra-mriqc/ :alt: Supported Python versions -.. image:: https://img.shields.io/pypi/v/pydra-anatomical.svg - :target: https://pypi.python.org/pypi/pydra-anatomical/ +.. image:: https://img.shields.io/pypi/v/pydra-mriqc.svg + :target: https://pypi.python.org/pypi/pydra-mriqc/ :alt: Latest Version -This package contains a collection of Pydra task interfaces for the anatomical toolkit. +This package contains a collection of Pydra task interfaces for the mriqc toolkit. The basis of this collection has been formed by the semi-automatic conversion of existing `Nipype `__ interfaces to Pydra using the `Nipype2Pydra `__ tool @@ -23,10 +23,10 @@ existing `Nipype `__ interfaces to Pydra using t Automatically-generated vs manually-curated tasks ------------------------------------------------- -Automatically generated tasks can be found in the `pydra.tasks.anatomical.auto` package. +Automatically generated tasks can be found in the `pydra.tasks.mriqc.auto` package. These packages should be treated with extreme caution as they likely do not pass testing. Generated tasks that have been edited and pass testing are imported into one or more of the -`pydra.tasks.anatomical.v*` packages, corresponding to the version of the anatomical toolkit +`pydra.tasks.mriqc.v*` packages, corresponding to the version of the mriqc toolkit they are designed for. Tests @@ -71,6 +71,14 @@ Contributing to this package Developer installation ~~~~~~~~~~~~~~~~~~~~~~ +Install the `fileformats `__ packages +corresponding to AFNI specific file formats + + +.. code-block:: + + $ pip install -e ./related-packages/fileformats[dev] + $ pip install -e ./related-packages/fileformats-extras[dev] Install repo in developer mode from the source directory and install pre-commit to ensure consistent code-style and quality. @@ -78,7 +86,7 @@ ensure consistent code-style and quality. .. code-block:: $ pip install -e .[test,dev] -$ pre-commit install + $ pre-commit install Next install the requirements for running the auto-conversion script and generate the Pydra task interfaces from their Nipype counterparts @@ -93,7 +101,8 @@ The run the conversion script to convert Nipype interfaces to Pydra $ nipype-auto-conv/generate -## Methodology +Methodology +~~~~~~~~~~~ The development of this package is expected to have two phases @@ -111,7 +120,7 @@ The auto-converted Pydra tasks are generated from their corresponding Nipype int in combination with "conversion hints" contained in YAML specs located in `nipype-auto-conv/specs/`. The self-documented conversion specs are to be edited by hand in order to assist the auto-converter produce valid pydra tasks. -After editing one or more conversion specs the `pydra.tasks.anatomical.auto` package should +After editing one or more conversion specs the `pydra.tasks.mriqc.auto` package should be regenerated by running .. code-block:: @@ -122,15 +131,15 @@ The tests should be run on the auto-generated tasks to see if they are valid .. code-block:: - $ pytest --doctest-modules pydra/tasks/anatomical/auto/tests/test_.py + $ pytest --doctest-modules pydra/tasks/mriqc/auto/tests/test_.py -If the test passes you should then edit the `pydra/tasks/anatomical/v/__init__.py` file +If the test passes you should then edit the `pydra/tasks/mriqc/v/__init__.py` file to import the now valid task interface to signify that it has been validated and is ready for use, e.g. .. code-block::python - from pydra.tasks.anatomical.auto import + from pydra.tasks.mriqc.auto import Typing and sample test data diff --git a/nipype-auto-conv/specs/upload_iq_ms.yaml b/datalad_identity_interface.yaml similarity index 52% rename from nipype-auto-conv/specs/upload_iq_ms.yaml rename to datalad_identity_interface.yaml index 54775fd..306a3dd 100644 --- a/nipype-auto-conv/specs/upload_iq_ms.yaml +++ b/datalad_identity_interface.yaml @@ -1,16 +1,14 @@ # This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.webapi.UploadIQMs' from Nipype to Pydra. +# 'mriqc.interfaces.datalad.DataladIdentityInterface' from Nipype to Pydra. # # Please fill-in/edit the fields below where appropriate # # Docs # ---- -# -# Upload features to MRIQCWebAPI -# -task_name: UploadIQMs -nipype_name: UploadIQMs -nipype_module: mriqc.interfaces.webapi +# Sneaks a ``datalad get`` in paths, if datalad is available. +task_name: DataladIdentityInterface +nipype_name: c +nipype_module: mriqc.interfaces.datalad inputs: omit: # list[str] - fields to omit from the Pydra interface @@ -22,8 +20,9 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_iqms: generic/file - # type=file|default=: the input IQMs-JSON file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -45,32 +44,22 @@ outputs: requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: -- inputs: - # dict[str, str] - values to provide to inputs fields in the task initialisation - # (if not specified, will try to choose a sensible value) - in_iqms: - # type=file|default=: the input IQMs-JSON file - endpoint: - # type=str|default='': URL of the POST endpoint - auth_token: - # type=str|default='': authentication token - email: - # type=str|default='': set sender email - strict: - # type=bool|default=False: crash if upload was not successful - imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item - # consisting of 'module', 'name', and optionally 'alias' keys - expected_outputs: - # dict[str, str] - expected values for selected outputs, noting that tests will typically - # be terminated before they complete for time-saving reasons, and therefore - # these values will be ignored, when running in CI - timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised - # successfully. Set to 0 to disable the timeout (warning, this could - # lead to the unittests taking a very long time to complete) - xfail: true - # bool - whether the unittest is expected to fail or not. Set to false - # when you are satisfied with the edits you have made to this file + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file doctests: [] diff --git a/datalad_identity_interface_callables.py b/datalad_identity_interface_callables.py new file mode 100644 index 0000000..d5225ae --- /dev/null +++ b/datalad_identity_interface_callables.py @@ -0,0 +1,6 @@ +"""Module to put any functions that are referred to in the "callables" section of DataladIdentityInterface.yaml""" + + +# Original source at L139 of /interfaces/datalad.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError diff --git a/docs/conf.py b/docs/conf.py index af3cb1d..03a8f65 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -17,7 +17,7 @@ # -- Project information ----------------------------------------------------- -project = "pydra-anatomical" +project = "pydra-mriqc" copyright = "2020, Xihe Xie" author = "Xihe Xie" diff --git a/docs/index.rst b/docs/index.rst index 86dda89..bd0a60a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,4 +1,4 @@ -Welcome to pydra-anatomical's documentation! +Welcome to pydra-mriqc's documentation! ========================================= .. toctree:: diff --git a/mock-tools/afni b/mock-tools/afni new file mode 100755 index 0000000..695aac5 --- /dev/null +++ b/mock-tools/afni @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +echo "Version AFNI_24_1_0" +echo "Dummy line" diff --git a/mock-tools/antsRegistration b/mock-tools/antsRegistration new file mode 100755 index 0000000..bca3fb9 --- /dev/null +++ b/mock-tools/antsRegistration @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +echo "ANTs Version: 2.5.1" \ No newline at end of file diff --git a/mock-tools/etc/fslversion b/mock-tools/etc/fslversion new file mode 100644 index 0000000..41bd15e --- /dev/null +++ b/mock-tools/etc/fslversion @@ -0,0 +1 @@ +6.0.7 \ No newline at end of file diff --git a/mock-tools/mrconvert b/mock-tools/mrconvert new file mode 100755 index 0000000..4ceec4f --- /dev/null +++ b/mock-tools/mrconvert @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +echo "== mrconvert 3.0.3" diff --git a/nipype-auto-conv/generate b/nipype-auto-conv/generate index d9f0a06..3e72ae0 100755 --- a/nipype-auto-conv/generate +++ b/nipype-auto-conv/generate @@ -1,73 +1,3 @@ -#!/usr/bin/env python3 -import sys -import os.path -from warnings import warn -from pathlib import Path -import shutil -from importlib import import_module -import yaml -import nipype -import nipype2pydra.utils -from nipype2pydra.task import get_converter - - -SPECS_DIR = Path(__file__).parent / "specs" -PKG_ROOT = Path(__file__).parent.parent -PKG_NAME = "mriqc" - -if ".dev" in nipype.__version__: - raise RuntimeError( - f"Cannot use a development version of Nipype {nipype.__version__}" - ) - -if ".dev" in nipype2pydra.__version__: - warn( - f"using development version of nipype2pydra ({nipype2pydra.__version__}), " - f"development component will be dropped in {PKG_NAME} package version" - ) - -# Insert specs dir into path so we can load callables modules -sys.path.insert(0, str(SPECS_DIR)) - -interfaces_init = f"# Auto-generated by {__file__}, do not edit as it will be overwritten\n\n" - -interfaces_dir = PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "interfaces" -if interfaces_dir.exists(): - shutil.rmtree(interfaces_dir) - -for fspath in sorted(SPECS_DIR.glob("**/*.yaml")): - with open(fspath) as f: - spec = yaml.load(f, Loader=yaml.SafeLoader) - print(f"processing {fspath}") - - rel_pkg_path = str(fspath.parent.relative_to(SPECS_DIR)).replace(os.path.sep, ".") - if rel_pkg_path == ".": - rel_pkg_path = fspath.stem - else: - rel_pkg_path += "." + fspath.stem - - callables = import_module(rel_pkg_path + "_callables") - - module_name = nipype2pydra.utils.to_snake_case(spec["task_name"]) - - converter = get_converter( - output_module=f"pydra.tasks.{PKG_NAME}.interfaces.{module_name}", - callables_module=callables, # type: ignore - **spec, - ) - converter.generate(PKG_ROOT) - interfaces_init += f"from .{module_name} import {converter.task_name}\n" - - -with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "interfaces" / "_version.py", "w") as f: - f.write( - f"""# Auto-generated by {__file__}, do not edit as it will be overwritten - -nipype_version = "{nipype.__version__.split('.dev')[0]}" -nipype2pydra_version = "{nipype2pydra.__version__.split('.dev')[0]}" -post_release = (nipype_version + nipype2pydra_version).replace(".", "") -""" - ) - -with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "interfaces" / "__init__.py", "w") as f: - f.write(interfaces_init) +#!/usr/bin/env bash +conv_dir=$(dirname $0) +nipype2pydra convert $conv_dir/specs $conv_dir/.. diff --git a/nipype-auto-conv/requirements.txt b/nipype-auto-conv/requirements.txt index 71aa364..85ec865 100644 --- a/nipype-auto-conv/requirements.txt +++ b/nipype-auto-conv/requirements.txt @@ -1,11 +1,6 @@ -black -attrs>=22.1.0 -nipype -pydra -PyYAML>=6.0 -fileformats >=0.8 -fileformats-medimage >=0.4 -fileformats-datascience >= 0.1 -fileformats-medimage-anatomical -traits -nipype2pydra \ No newline at end of file +nipype2pydra +mriqc +nipy +datalad +nitime +nirodents diff --git a/nipype-auto-conv/specs/add_provenance_callables.py b/nipype-auto-conv/specs/add_provenance_callables.py deleted file mode 100644 index 1681d92..0000000 --- a/nipype-auto-conv/specs/add_provenance_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in AddProvenance.yaml""" diff --git a/nipype-auto-conv/specs/artifact_mask_callables.py b/nipype-auto-conv/specs/artifact_mask_callables.py deleted file mode 100644 index 38e48cc..0000000 --- a/nipype-auto-conv/specs/artifact_mask_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in ArtifactMask.yaml""" diff --git a/nipype-auto-conv/specs/compute_qi2_callables.py b/nipype-auto-conv/specs/compute_qi2_callables.py deleted file mode 100644 index 30791b6..0000000 --- a/nipype-auto-conv/specs/compute_qi2_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in ComputeQI2.yaml""" diff --git a/nipype-auto-conv/specs/conform_image_callables.py b/nipype-auto-conv/specs/conform_image_callables.py deleted file mode 100644 index 19d369a..0000000 --- a/nipype-auto-conv/specs/conform_image_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in ConformImage.yaml""" diff --git a/nipype-auto-conv/specs/correct_signal_drift_callables.py b/nipype-auto-conv/specs/correct_signal_drift_callables.py deleted file mode 100644 index 3f8cb9e..0000000 --- a/nipype-auto-conv/specs/correct_signal_drift_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in CorrectSignalDrift.yaml""" diff --git a/nipype-auto-conv/specs/dipy_dti_callables.py b/nipype-auto-conv/specs/dipy_dti_callables.py deleted file mode 100644 index 0682bcc..0000000 --- a/nipype-auto-conv/specs/dipy_dti_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in DipyDTI.yaml""" diff --git a/nipype-auto-conv/specs/ensure_size_callables.py b/nipype-auto-conv/specs/ensure_size_callables.py deleted file mode 100644 index 0289beb..0000000 --- a/nipype-auto-conv/specs/ensure_size_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in EnsureSize.yaml""" diff --git a/nipype-auto-conv/specs/extract_b0_callables.py b/nipype-auto-conv/specs/extract_b0_callables.py deleted file mode 100644 index b07d039..0000000 --- a/nipype-auto-conv/specs/extract_b0_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in ExtractB0.yaml""" diff --git a/nipype-auto-conv/specs/filter_shells_callables.py b/nipype-auto-conv/specs/filter_shells_callables.py deleted file mode 100644 index c567417..0000000 --- a/nipype-auto-conv/specs/filter_shells_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in FilterShells.yaml""" diff --git a/nipype-auto-conv/specs/functional_qc_callables.py b/nipype-auto-conv/specs/functional_qc_callables.py deleted file mode 100644 index e991c37..0000000 --- a/nipype-auto-conv/specs/functional_qc_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in FunctionalQC.yaml""" diff --git a/nipype-auto-conv/specs/gather_timeseries_callables.py b/nipype-auto-conv/specs/gather_timeseries_callables.py deleted file mode 100644 index d5f2a6e..0000000 --- a/nipype-auto-conv/specs/gather_timeseries_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in GatherTimeseries.yaml""" diff --git a/nipype-auto-conv/specs/gcor_callables.py b/nipype-auto-conv/specs/gcor_callables.py deleted file mode 100644 index d268a02..0000000 --- a/nipype-auto-conv/specs/gcor_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in GCOR.yaml""" diff --git a/nipype-auto-conv/specs/harmonize_callables.py b/nipype-auto-conv/specs/harmonize_callables.py deleted file mode 100644 index 9904b86..0000000 --- a/nipype-auto-conv/specs/harmonize_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in Harmonize.yaml""" diff --git a/nipype-auto-conv/specs/interfaces/add_provenance.yaml b/nipype-auto-conv/specs/interfaces/add_provenance.yaml new file mode 100644 index 0000000..dc890e2 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/add_provenance.yaml @@ -0,0 +1,85 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.reports.AddProvenance' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# Builds a provenance dictionary. +task_name: AddProvenance +nipype_name: AddProvenance +nipype_module: mriqc.interfaces.reports +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + air_msk: generic/file + # type=file|default=: air mask file + in_file: generic/file + # type=file|default=: input file + rot_msk: generic/file + # type=file|default=: rotation mask file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + out_prov: out_prov_callable + # type=dict: + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: input file + air_msk: + # type=file|default=: air mask file + rot_msk: + # type=file|default=: rotation mask file + modality: + # type=str|default='': provenance type + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] +find_replace: + - ["config.environment.version", "''"] + - ["config.execution.debug", "False"] + - ["config.workflow.fd_thres,", "0.2, # .fd_thres"] diff --git a/nipype-auto-conv/specs/interfaces/add_provenance_callables.py b/nipype-auto-conv/specs/interfaces/add_provenance_callables.py new file mode 100644 index 0000000..50b54c8 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/add_provenance_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of AddProvenance.yaml""" + + +def out_prov_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_prov"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/artifact_mask.yaml b/nipype-auto-conv/specs/interfaces/artifact_mask.yaml similarity index 93% rename from nipype-auto-conv/specs/artifact_mask.yaml rename to nipype-auto-conv/specs/interfaces/artifact_mask.yaml index 7fbe87d..9fa1f22 100644 --- a/nipype-auto-conv/specs/artifact_mask.yaml +++ b/nipype-auto-conv/specs/interfaces/artifact_mask.yaml @@ -22,12 +22,15 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: generic/file - # type=file|default=: File to be plotted head_mask: generic/file # type=file|default=: head mask + in_file: generic/file + # type=file|default=: File to be plotted ind2std_xfm: generic/file # type=file|default=: individual to standard affine transform + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -41,12 +44,12 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_hat_msk: generic/file - # type=file: output "hat" mask - out_art_msk: generic/file - # type=file: output artifacts mask out_air_msk: generic/file # type=file: output "hat" mask, without artifacts + out_art_msk: generic/file + # type=file: output artifacts mask + out_hat_msk: generic/file + # type=file: output "hat" mask callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -71,7 +74,7 @@ tests: zscore: # type=float|default=10.0: z-score to consider artifacts imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/artifact_mask_callables.py b/nipype-auto-conv/specs/interfaces/artifact_mask_callables.py new file mode 100644 index 0000000..3f3378f --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/artifact_mask_callables.py @@ -0,0 +1,27 @@ +"""Module to put any functions that are referred to in the "callables" section of ArtifactMask.yaml""" + + +def out_air_msk_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_air_msk"] + + +def out_art_msk_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_art_msk"] + + +def out_hat_msk_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_hat_msk"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/cc_segmentation.yaml b/nipype-auto-conv/specs/interfaces/cc_segmentation.yaml new file mode 100644 index 0000000..c29c245 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/cc_segmentation.yaml @@ -0,0 +1,87 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.diffusion.CCSegmentation' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# Computes :abbr:`QC (Quality Control)` measures on the input DWI EPI scan. +task_name: CCSegmentation +nipype_name: CCSegmentation +nipype_module: mriqc.interfaces.diffusion +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + in_cfa: generic/file + # type=file|default=: color FA file + in_fa: generic/file + # type=file|default=: fractional anisotropy (FA) file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_mask: generic/file + # type=file: output mask of the corpus callosum + wm_finalmask: generic/file + # type=file: output mask of the white-matter after binary opening + wm_mask: generic/file + # type=file: output mask of the white-matter (thresholded) + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_fa: + # type=file|default=: fractional anisotropy (FA) file + in_cfa: + # type=file|default=: color FA file + min_rgb: + # type=tuple|default=(0.4, 0.008, 0.008): minimum RGB within the CC + max_rgb: + # type=tuple|default=(1.1, 0.25, 0.25): maximum RGB within the CC + wm_threshold: + # type=float|default=0.35: WM segmentation threshold + clean_mask: + # type=bool|default=False: run a final cleanup step on mask + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] diff --git a/nipype-auto-conv/specs/interfaces/cc_segmentation_callables.py b/nipype-auto-conv/specs/interfaces/cc_segmentation_callables.py new file mode 100644 index 0000000..973767b --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/cc_segmentation_callables.py @@ -0,0 +1,27 @@ +"""Module to put any functions that are referred to in the "callables" section of CCSegmentation.yaml""" + + +def out_mask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_mask"] + + +def wm_finalmask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["wm_finalmask"] + + +def wm_mask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["wm_mask"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/compute_qi2.yaml b/nipype-auto-conv/specs/interfaces/compute_qi2.yaml similarity index 90% rename from nipype-auto-conv/specs/compute_qi2.yaml rename to nipype-auto-conv/specs/interfaces/compute_qi2.yaml index ac11ad2..2606c71 100644 --- a/nipype-auto-conv/specs/compute_qi2.yaml +++ b/nipype-auto-conv/specs/interfaces/compute_qi2.yaml @@ -22,10 +22,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: generic/file - # type=file|default=: File to be plotted air_msk: generic/file # type=file|default=: air (without artifacts) mask + in_file: generic/file + # type=file|default=: File to be plotted + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -44,6 +47,8 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + qi2: qi2_callable + # type=float: computed QI2 value templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -57,7 +62,7 @@ tests: air_msk: # type=file|default=: air (without artifacts) mask imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/compute_qi2_callables.py b/nipype-auto-conv/specs/interfaces/compute_qi2_callables.py new file mode 100644 index 0000000..86a6ecc --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/compute_qi2_callables.py @@ -0,0 +1,20 @@ +"""Module to put any functions that are referred to in the "callables" section of ComputeQI2.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +def qi2_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["qi2"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/conform_image.yaml b/nipype-auto-conv/specs/interfaces/conform_image.yaml new file mode 100644 index 0000000..9647d5b --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/conform_image.yaml @@ -0,0 +1,136 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.common.conform_image.ConformImage' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# +# Conforms an input image. +# +# List of nifti datatypes: +# +# .. note: Original Analyze 7.5 types +# +# DT_NONE 0 +# DT_UNKNOWN 0 / what it says, dude / +# DT_BINARY 1 / binary (1 bit/voxel) / +# DT_UNSIGNED_CHAR 2 / unsigned char (8 bits/voxel) / +# DT_SIGNED_SHORT 4 / signed short (16 bits/voxel) / +# DT_SIGNED_INT 8 / signed int (32 bits/voxel) / +# DT_FLOAT 16 / float (32 bits/voxel) / +# DT_COMPLEX 32 / complex (64 bits/voxel) / +# DT_DOUBLE 64 / double (64 bits/voxel) / +# DT_RGB 128 / RGB triple (24 bits/voxel) / +# DT_ALL 255 / not very useful (?) / +# +# .. note: Added names for the same data types +# +# DT_UINT8 2 +# DT_INT16 4 +# DT_INT32 8 +# DT_FLOAT32 16 +# DT_COMPLEX64 32 +# DT_FLOAT64 64 +# DT_RGB24 128 +# +# .. note: New codes for NIfTI +# +# DT_INT8 256 / signed char (8 bits) / +# DT_UINT16 512 / unsigned short (16 bits) / +# DT_UINT32 768 / unsigned int (32 bits) / +# DT_INT64 1024 / long long (64 bits) / +# DT_UINT64 1280 / unsigned long long (64 bits) / +# DT_FLOAT128 1536 / long double (128 bits) / +# DT_COMPLEX128 1792 / double pair (128 bits) / +# DT_COMPLEX256 2048 / long double pair (256 bits) / +# NIFTI_TYPE_UINT8 2 /! unsigned char. / +# NIFTI_TYPE_INT16 4 /! signed short. / +# NIFTI_TYPE_INT32 8 /! signed int. / +# NIFTI_TYPE_FLOAT32 16 /! 32 bit float. / +# NIFTI_TYPE_COMPLEX64 32 /! 64 bit complex = 2 32 bit floats. / +# NIFTI_TYPE_FLOAT64 64 /! 64 bit float = double. / +# NIFTI_TYPE_RGB24 128 /! 3 8 bit bytes. / +# NIFTI_TYPE_INT8 256 /! signed char. / +# NIFTI_TYPE_UINT16 512 /! unsigned short. / +# NIFTI_TYPE_UINT32 768 /! unsigned int. / +# NIFTI_TYPE_INT64 1024 /! signed long long. / +# NIFTI_TYPE_UINT64 1280 /! unsigned long long. / +# NIFTI_TYPE_FLOAT128 1536 /! 128 bit float = long double. / +# NIFTI_TYPE_COMPLEX128 1792 /! 128 bit complex = 2 64 bit floats. / +# NIFTI_TYPE_COMPLEX256 2048 /! 256 bit complex = 2 128 bit floats / +# +# +task_name: ConformImage +nipype_name: ConformImage +nipype_module: mriqc.interfaces.common.conform_image +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + in_file: generic/file + # type=file|default=: input image + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_file: generic/file + # type=file: output conformed file + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: input image + check_ras: + # type=bool|default=True: check that orientation is RAS + check_dtype: + # type=bool|default=True: check data type + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] +find_replace: + - [config\.loggers\.interface, "logger"] + - [ + messages\.SUSPICIOUS_DATA_TYPE, + '"Input image {in_file} has a suspicious data type: ''{dtype}''"', + ] diff --git a/nipype-auto-conv/specs/interfaces/conform_image_callables.py b/nipype-auto-conv/specs/interfaces/conform_image_callables.py new file mode 100644 index 0000000..a26857a --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/conform_image_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of ConformImage.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/correct_signal_drift.yaml b/nipype-auto-conv/specs/interfaces/correct_signal_drift.yaml similarity index 87% rename from nipype-auto-conv/specs/correct_signal_drift.yaml rename to nipype-auto-conv/specs/interfaces/correct_signal_drift.yaml index f49fe2e..5726c69 100644 --- a/nipype-auto-conv/specs/correct_signal_drift.yaml +++ b/nipype-auto-conv/specs/interfaces/correct_signal_drift.yaml @@ -20,8 +20,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: generic/file - # type=file|default=: a 4D file with all low-b volumes bias_file: generic/file # type=file|default=: a B1 bias field brainmask_file: generic/file @@ -30,6 +28,11 @@ inputs: # type=file|default=: bvalues file full_epi: generic/file # type=file|default=: a whole DWI dataset to be corrected for drift + in_file: generic/file + # type=file|default=: a 4D file with (exclusively) realigned low-b volumes + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -44,7 +47,7 @@ outputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. out_file: generic/file - # type=file: input file after drift correction + # type=file: a 4D file with (exclusively) realigned, drift-corrected low-b volumes out_full_file: generic/file # type=file: full DWI input after drift correction callables: @@ -59,7 +62,7 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) in_file: - # type=file|default=: a 4D file with all low-b volumes + # type=file|default=: a 4D file with (exclusively) realigned low-b volumes bias_file: # type=file|default=: a B1 bias field brainmask_file: @@ -71,7 +74,7 @@ tests: full_epi: # type=file|default=: a whole DWI dataset to be corrected for drift imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/correct_signal_drift_callables.py b/nipype-auto-conv/specs/interfaces/correct_signal_drift_callables.py new file mode 100644 index 0000000..f8f9d08 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/correct_signal_drift_callables.py @@ -0,0 +1,34 @@ +"""Module to put any functions that are referred to in the "callables" section of CorrectSignalDrift.yaml""" + + +def b0_drift_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["b0_drift"] + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +def out_full_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_full_file"] + + +def signal_drift_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["signal_drift"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/derivatives_data_sink.yaml b/nipype-auto-conv/specs/interfaces/derivatives_data_sink.yaml new file mode 100644 index 0000000..3272310 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/derivatives_data_sink.yaml @@ -0,0 +1,91 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.DerivativesDataSink' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# +task_name: DerivativesDataSink +nipype_name: DerivativesDataSink +nipype_module: mriqc.interfaces +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + base_directory: generic/directory + # type=directory|default='': Path to the base directory for storing data. + in_file: generic/file+list-of + # type=inputmultiobject|default=[]: the object to be saved + source_file: generic/file+list-of + # type=inputmultiobject|default=[]: the source file(s) to extract entities from + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_file: generic/file+list-of + # type=outputmultiobject: + out_meta: generic/file+list-of + # type=outputmultiobject: + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + base_directory: + # type=directory|default='': Path to the base directory for storing data. + check_hdr: + # type=bool|default=True: fix headers of NIfTI outputs + compress: + # type=inputmultiobject|default=[]: whether ``in_file`` should be compressed (True), uncompressed (False) or left unmodified (None, default). + data_dtype: + # type=str|default='': NumPy datatype to coerce NIfTI data to, or `source` tomatch the input file dtype + dismiss_entities: + # type=inputmultiobject|default=[]: a list entities that will not be propagated from the source file + in_file: + # type=inputmultiobject|default=[]: the object to be saved + meta_dict: + # type=dict|default={}: an input dictionary containing metadata + source_file: + # type=inputmultiobject|default=[]: the source file(s) to extract entities from + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] diff --git a/nipype-auto-conv/specs/interfaces/derivatives_data_sink_callables.py b/nipype-auto-conv/specs/interfaces/derivatives_data_sink_callables.py new file mode 100644 index 0000000..7447954 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/derivatives_data_sink_callables.py @@ -0,0 +1,34 @@ +"""Module to put any functions that are referred to in the "callables" section of DerivativesDataSink.yaml""" + + +def compression_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["compression"] + + +def fixed_hdr_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fixed_hdr"] + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +def out_meta_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_meta"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/dipy_dti.yaml b/nipype-auto-conv/specs/interfaces/diffusion_model.yaml similarity index 76% rename from nipype-auto-conv/specs/dipy_dti.yaml rename to nipype-auto-conv/specs/interfaces/diffusion_model.yaml index ddc1c36..a507a4f 100644 --- a/nipype-auto-conv/specs/dipy_dti.yaml +++ b/nipype-auto-conv/specs/interfaces/diffusion_model.yaml @@ -1,13 +1,19 @@ # This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.diffusion.DipyDTI' from Nipype to Pydra. +# 'mriqc.interfaces.diffusion.DiffusionModel' from Nipype to Pydra. # # Please fill-in/edit the fields below where appropriate # # Docs # ---- -# Split a DWI dataset into . -task_name: DipyDTI -nipype_name: DipyDTI +# +# Fit a :obj:`~dipy.reconst.dki.DiffusionKurtosisModel` on the dataset. +# +# If ``n_shells`` is set to 1, then a :obj:`~dipy.reconst.dti.TensorModel` +# is used. +# +# +task_name: DiffusionModel +nipype_name: DiffusionModel nipype_module: mriqc.interfaces.diffusion inputs: omit: @@ -20,12 +26,15 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: generic/file - # type=file|default=: dwi file + brain_mask: generic/file + # type=file|default=: brain mask file bvec_file: generic/file # type=file|default=: b-vectors - brainmask: generic/file - # type=file|default=: brain mask file + in_file: generic/file + # type=file|default=: dwi file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -39,8 +48,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + out_cfa: generic/file + # type=file: output color FA file out_fa: generic/file # type=file: output FA file + out_fa_degenerate: generic/file + # type=file: binary mask of values outside [0, 1] in the "raw" FA map + out_fa_nans: generic/file + # type=file: binary mask of NaN values in the "raw" FA map out_md: generic/file # type=file: output MD file callables: @@ -60,14 +75,14 @@ tests: # type=list|default=[]: bval table bvec_file: # type=file|default=: b-vectors - brainmask: + brain_mask: # type=file|default=: brain mask file - free_water_model: - # type=bool|default=False: use free water model - b_threshold: - # type=float|default=1100: use only inner shells of the data + decimals: + # type=int|default=3: round output maps for reliability + n_shells: + # type=int|default=0: number of shells imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/diffusion_model_callables.py b/nipype-auto-conv/specs/interfaces/diffusion_model_callables.py new file mode 100644 index 0000000..a197637 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/diffusion_model_callables.py @@ -0,0 +1,41 @@ +"""Module to put any functions that are referred to in the "callables" section of DiffusionModel.yaml""" + + +def out_cfa_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_cfa"] + + +def out_fa_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_fa"] + + +def out_fa_degenerate_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_fa_degenerate"] + + +def out_fa_nans_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_fa_nans"] + + +def out_md_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_md"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/diffusion_qc.yaml b/nipype-auto-conv/specs/interfaces/diffusion_qc.yaml new file mode 100644 index 0000000..dd3c1a2 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/diffusion_qc.yaml @@ -0,0 +1,165 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.diffusion.DiffusionQC' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# Computes :abbr:`QC (Quality Control)` measures on the input DWI EPI scan. +task_name: DiffusionQC +nipype_name: DiffusionQC +nipype_module: mriqc.interfaces.diffusion +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + brain_mask: generic/file + # type=file|default=: input probabilistic brain mask + cc_mask: generic/file + # type=file|default=: input binary mask of the corpus callosum + in_b0: generic/file + # type=file|default=: input b=0 average + in_bval_file: generic/file + # type=file|default=: original b-vals file + in_cfa: generic/file + # type=file|default=: output color FA file + in_fa: generic/file + # type=file|default=: input FA map + in_fa_degenerate: generic/file + # type=file|default=: binary mask of values outside [0, 1] in the "raw" FA map + in_fa_nans: generic/file + # type=file|default=: binary mask of NaN values in the "raw" FA map + in_fd: generic/file + # type=file|default=: motion parameters for FD computation + in_file: generic/file + # type=file|default=: original EPI 4D file + in_md: generic/file + # type=file|default=: input MD map + in_shells: generic/file+list-of + # type=inputmultiobject|default=[]: DWI data after HMC and split by shells (indexed by in_bval) + spikes_mask: generic/file + # type=file|default=: input binary mask of spiking voxels + wm_mask: generic/file + # type=file|default=: input probabilistic white-matter mask + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + bdiffs: bdiffs_callable + # type=dict: + efc: efc_callable + # type=dict: + fa_degenerate: fa_degenerate_callable + # type=float: + fa_nans: fa_nans_callable + # type=float: + fber: fber_callable + # type=dict: + fd: fd_callable + # type=dict: + ndc: ndc_callable + # type=float: + out_qc: out_qc_callable + # type=dict: output flattened dictionary with all measures + sigma: sigma_callable + # type=dict: + snr_cc: snr_cc_callable + # type=dict: + spikes: spikes_callable + # type=dict: + summary: summary_callable + # type=dict: + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: original EPI 4D file + in_b0: + # type=file|default=: input b=0 average + in_shells: + # type=inputmultiobject|default=[]: DWI data after HMC and split by shells (indexed by in_bval) + in_shells_bval: + # type=list|default=[]: list of unique b-values (one per shell), ordered by growing intensity + in_bval_file: + # type=file|default=: original b-vals file + in_bvec: + # type=list|default=[]: a list of shell-wise splits of b-vectors lists -- first list are b=0 + in_bvec_rotated: + # type=list|default=[]: b-vectors after rotating by the head-motion correction transform + in_bvec_diff: + # type=list|default=[]: list of angle deviations from the original b-vectors table + in_fa: + # type=file|default=: input FA map + in_fa_nans: + # type=file|default=: binary mask of NaN values in the "raw" FA map + in_fa_degenerate: + # type=file|default=: binary mask of values outside [0, 1] in the "raw" FA map + in_cfa: + # type=file|default=: output color FA file + in_md: + # type=file|default=: input MD map + brain_mask: + # type=file|default=: input probabilistic brain mask + wm_mask: + # type=file|default=: input probabilistic white-matter mask + cc_mask: + # type=file|default=: input binary mask of the corpus callosum + spikes_mask: + # type=file|default=: input binary mask of spiking voxels + noise_floor: + # type=float|default=0.0: noise-floor map estimated by means of PCA + direction: + # type=enum|default='all'|allowed['-x','-y','all','x','y']: direction for GSR computation + in_fd: + # type=file|default=: motion parameters for FD computation + fd_thres: + # type=float|default=0.2: FD threshold for orientation exclusion based on head motion + in_fwhm: + # type=list|default=[]: smoothness estimated with AFNI + qspace_neighbors: + # type=list|default=[]: q-space nearest neighbor pairs + piesno_sigma: + # type=float|default=-1.0: noise sigma calculated with PIESNO + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] diff --git a/nipype-auto-conv/specs/interfaces/diffusion_qc_callables.py b/nipype-auto-conv/specs/interfaces/diffusion_qc_callables.py new file mode 100644 index 0000000..4b638ab --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/diffusion_qc_callables.py @@ -0,0 +1,90 @@ +"""Module to put any functions that are referred to in the "callables" section of DiffusionQC.yaml""" + + +def bdiffs_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["bdiffs"] + + +def efc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["efc"] + + +def fa_degenerate_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fa_degenerate"] + + +def fa_nans_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fa_nans"] + + +def fber_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fber"] + + +def fd_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fd"] + + +def ndc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["ndc"] + + +def out_qc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_qc"] + + +def sigma_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["sigma"] + + +def snr_cc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["snr_cc"] + + +def spikes_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["spikes"] + + +def summary_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["summary"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/ensure_size.yaml b/nipype-auto-conv/specs/interfaces/ensure_size.yaml new file mode 100644 index 0000000..bfaa99b --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/ensure_size.yaml @@ -0,0 +1,92 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.common.ensure_size.EnsureSize' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# +# Checks the size of the input image and resamples it to have `pixel_size`. +# +task_name: EnsureSize +nipype_name: EnsureSize +nipype_module: mriqc.interfaces.common.ensure_size +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + in_file: generic/file + # type=file|default=: input image + in_mask: generic/file + # type=file|default=: input mask + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_file: generic/file + # type=file: output image + out_mask: generic/file + # type=file: output mask + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: input image + in_mask: + # type=file|default=: input mask + pixel_size: + # type=float|default=2.0: desired pixel size (mm) + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] +find_replace: + - ["config.loggers.interface", "logger"] + - [ + "messages.VOXEL_SIZE_SMALL", + "'One or more voxel dimensions (%f, %f, %f) are smaller than the requested voxel size (%f) - diff=(%f, %f, %f)'", + ] + - ["messages.VOXEL_SIZE_OK", "'Voxel size is large enough.'"] + - [ + "load_data = Loader\\(\"mriqc\"\\)", + 'load_data = Loader("pydra.tasks.mriqc")', + ] diff --git a/nipype-auto-conv/specs/interfaces/ensure_size_callables.py b/nipype-auto-conv/specs/interfaces/ensure_size_callables.py new file mode 100644 index 0000000..96d1a94 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/ensure_size_callables.py @@ -0,0 +1,20 @@ +"""Module to put any functions that are referred to in the "callables" section of EnsureSize.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +def out_mask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_mask"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/extract_b0.yaml b/nipype-auto-conv/specs/interfaces/extract_orientations.yaml similarity index 82% rename from nipype-auto-conv/specs/extract_b0.yaml rename to nipype-auto-conv/specs/interfaces/extract_orientations.yaml index 86611b2..8da5f32 100644 --- a/nipype-auto-conv/specs/extract_b0.yaml +++ b/nipype-auto-conv/specs/interfaces/extract_orientations.yaml @@ -1,13 +1,13 @@ # This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.diffusion.ExtractB0' from Nipype to Pydra. +# 'mriqc.interfaces.diffusion.ExtractOrientations' from Nipype to Pydra. # # Please fill-in/edit the fields below where appropriate # # Docs # ---- # Extract all b=0 volumes from a dwi series. -task_name: ExtractB0 -nipype_name: ExtractB0 +task_name: ExtractOrientations +nipype_name: ExtractOrientations nipype_module: mriqc.interfaces.diffusion inputs: omit: @@ -20,8 +20,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + in_bvec_file: generic/file + # type=file|default=: b-vectors file in_file: generic/file # type=file|default=: dwi file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -50,10 +55,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: dwi file - b0_ixs: - # type=list|default=[]: Index of b0s + indices: + # type=list|default=[]: indices to be extracted + in_bvec_file: + # type=file|default=: b-vectors file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/extract_orientations_callables.py b/nipype-auto-conv/specs/interfaces/extract_orientations_callables.py new file mode 100644 index 0000000..8a003f8 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/extract_orientations_callables.py @@ -0,0 +1,20 @@ +"""Module to put any functions that are referred to in the "callables" section of ExtractOrientations.yaml""" + + +def out_bvec_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bvec"] + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/filter_shells.yaml b/nipype-auto-conv/specs/interfaces/filter_shells.yaml similarity index 92% rename from nipype-auto-conv/specs/filter_shells.yaml rename to nipype-auto-conv/specs/interfaces/filter_shells.yaml index 07d39d8..c6eba3d 100644 --- a/nipype-auto-conv/specs/filter_shells.yaml +++ b/nipype-auto-conv/specs/interfaces/filter_shells.yaml @@ -20,10 +20,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: generic/file - # type=file|default=: dwi file bvec_file: generic/file # type=file|default=: b-vectors + in_file: generic/file + # type=file|default=: dwi file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -37,12 +40,12 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: generic/file - # type=file: filtered DWI file - out_bvec_file: generic/file - # type=file: filtered bvecs file out_bval_file: generic/file # type=file: filtered bvals file + out_bvec_file: generic/file + # type=file: filtered bvecs file + out_file: generic/file + # type=file: filtered DWI file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -63,7 +66,7 @@ tests: b_threshold: # type=float|default=1100: b-values threshold imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/filter_shells_callables.py b/nipype-auto-conv/specs/interfaces/filter_shells_callables.py new file mode 100644 index 0000000..69b9897 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/filter_shells_callables.py @@ -0,0 +1,34 @@ +"""Module to put any functions that are referred to in the "callables" section of FilterShells.yaml""" + + +def out_bval_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bval_file"] + + +def out_bvals_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bvals"] + + +def out_bvec_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bvec_file"] + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/functional_qc.yaml b/nipype-auto-conv/specs/interfaces/functional_qc.yaml similarity index 82% rename from nipype-auto-conv/specs/functional_qc.yaml rename to nipype-auto-conv/specs/interfaces/functional_qc.yaml index 45a9d44..a7e18c2 100644 --- a/nipype-auto-conv/specs/functional_qc.yaml +++ b/nipype-auto-conv/specs/interfaces/functional_qc.yaml @@ -24,18 +24,21 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + in_dvars: generic/file + # type=file|default=: input file containing DVARS in_epi: generic/file # type=file|default=: input EPI file + in_fd: generic/file + # type=file|default=: motion parameters for FD computation in_hmc: generic/file # type=file|default=: input motion corrected file - in_tsnr: generic/file - # type=file|default=: input tSNR volume in_mask: generic/file # type=file|default=: input mask - in_fd: generic/file - # type=file|default=: motion parameters for FD computation - in_dvars: generic/file - # type=file|default=: input file containing DVARS + in_tsnr: generic/file + # type=file|default=: input tSNR volume + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -52,6 +55,30 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + dvars: dvars_callable + # type=dict: + efc: efc_callable + # type=float: + fber: fber_callable + # type=float: + fd: fd_callable + # type=dict: + fwhm: fwhm_callable + # type=dict: full width half-maximum measure + gsr: gsr_callable + # type=dict: + out_qc: out_qc_callable + # type=dict: output flattened dictionary with all measures + size: size_callable + # type=dict: + snr: snr_callable + # type=float: + spacing: spacing_callable + # type=dict: + summary: summary_callable + # type=dict: + tsnr: tsnr_callable + # type=float: templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -79,7 +106,7 @@ tests: in_fwhm: # type=list|default=[]: smoothness estimated with AFNI imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/functional_qc_callables.py b/nipype-auto-conv/specs/interfaces/functional_qc_callables.py new file mode 100644 index 0000000..a8cf7b3 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/functional_qc_callables.py @@ -0,0 +1,90 @@ +"""Module to put any functions that are referred to in the "callables" section of FunctionalQC.yaml""" + + +def dvars_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["dvars"] + + +def efc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["efc"] + + +def fber_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fber"] + + +def fd_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fd"] + + +def fwhm_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fwhm"] + + +def gsr_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["gsr"] + + +def out_qc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_qc"] + + +def size_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["size"] + + +def snr_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["snr"] + + +def spacing_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["spacing"] + + +def summary_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["summary"] + + +def tsnr_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["tsnr"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/gather_timeseries.yaml b/nipype-auto-conv/specs/interfaces/gather_timeseries.yaml similarity index 91% rename from nipype-auto-conv/specs/gather_timeseries.yaml rename to nipype-auto-conv/specs/interfaces/gather_timeseries.yaml index e3a324b..caf01a2 100644 --- a/nipype-auto-conv/specs/gather_timeseries.yaml +++ b/nipype-auto-conv/specs/interfaces/gather_timeseries.yaml @@ -33,6 +33,9 @@ inputs: # type=file|default=: input file containing timeseries of AFNI's outlier count quality: generic/file # type=file|default=: input file containing AFNI's Quality Index + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -51,6 +54,8 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + timeseries_metadata: timeseries_metadata_callable + # type=dict: Metadata dictionary describing columns templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -72,7 +77,7 @@ tests: quality: # type=file|default=: input file containing AFNI's Quality Index imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/gather_timeseries_callables.py b/nipype-auto-conv/specs/interfaces/gather_timeseries_callables.py new file mode 100644 index 0000000..ee78952 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/gather_timeseries_callables.py @@ -0,0 +1,20 @@ +"""Module to put any functions that are referred to in the "callables" section of GatherTimeseries.yaml""" + + +def timeseries_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["timeseries_file"] + + +def timeseries_metadata_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["timeseries_metadata"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/gcor.yaml b/nipype-auto-conv/specs/interfaces/gcor.yaml similarity index 90% rename from nipype-auto-conv/specs/gcor.yaml rename to nipype-auto-conv/specs/interfaces/gcor.yaml index 58096a4..812d9ef 100644 --- a/nipype-auto-conv/specs/gcor.yaml +++ b/nipype-auto-conv/specs/interfaces/gcor.yaml @@ -40,6 +40,9 @@ inputs: # type=file|default=: input dataset to compute the GCOR over mask: generic/file # type=file|default=: mask dataset, for restricting the computation + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -56,6 +59,8 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + out: out_callable + # type=float: global correlation value templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -77,7 +82,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -99,7 +104,7 @@ tests: nfirst: '4' # type=int|default=0: specify number of initial TRs to ignore imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: mriqc.interfaces.transitional name: GCOR @@ -123,12 +128,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - in_file: + in_file: '"func.nii"' # type=file|default=: input dataset to compute the GCOR over nfirst: '4' # type=int|default=0: specify number of initial TRs to ignore imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/interfaces/gcor_callables.py b/nipype-auto-conv/specs/interfaces/gcor_callables.py new file mode 100644 index 0000000..58a9755 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/gcor_callables.py @@ -0,0 +1,18 @@ +"""Module to put any functions that are referred to in the "callables" section of GCOR.yaml""" + + +def out_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L98 of /interfaces/transitional.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return {"out": _gcor} diff --git a/nipype-auto-conv/specs/harmonize.yaml b/nipype-auto-conv/specs/interfaces/harmonize.yaml similarity index 92% rename from nipype-auto-conv/specs/harmonize.yaml rename to nipype-auto-conv/specs/interfaces/harmonize.yaml index d070290..9e3faf1 100644 --- a/nipype-auto-conv/specs/harmonize.yaml +++ b/nipype-auto-conv/specs/interfaces/harmonize.yaml @@ -26,6 +26,9 @@ inputs: # type=file|default=: input data (after bias correction) wm_mask: generic/file # type=file|default=: white-matter mask + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -61,7 +64,7 @@ tests: thresh: # type=float|default=0.9: WM probability threshold imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/harmonize_callables.py b/nipype-auto-conv/specs/interfaces/harmonize_callables.py new file mode 100644 index 0000000..27a9ad6 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/harmonize_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of Harmonize.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/iqm_file_sink.yaml b/nipype-auto-conv/specs/interfaces/iqm_file_sink.yaml new file mode 100644 index 0000000..6618818 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/iqm_file_sink.yaml @@ -0,0 +1,101 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.bids.IQMFileSink' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# +task_name: IQMFileSink +nipype_name: IQMFileSink +nipype_module: mriqc.interfaces.bids +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_dir: Path + # type=file|default=: the output directory + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_file: generic/file + # type=file: the output JSON file containing the IQMs + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=str|default='': path of input file + subject_id: + # type=str|default='': the subject id + modality: + # type=str|default='': the qc type + session_id: + # type=traitcompound|default=None: + task_id: + # type=traitcompound|default=None: + acq_id: + # type=traitcompound|default=None: + rec_id: + # type=traitcompound|default=None: + run_id: + # type=traitcompound|default=None: + dataset: + # type=str|default='': dataset identifier + dismiss_entities: + # type=list|default=['part']: + metadata: + # type=dict|default={}: + provenance: + # type=dict|default={}: + root: + # type=dict|default={}: output root dictionary + out_dir: + # type=file|default=: the output directory + _outputs: + # type=dict|default={}: + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] +find_replace: + - [config\.loggers\.\w+\., logger.] diff --git a/nipype-auto-conv/specs/interfaces/iqm_file_sink_callables.py b/nipype-auto-conv/specs/interfaces/iqm_file_sink_callables.py new file mode 100644 index 0000000..37a8f1d --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/iqm_file_sink_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of IQMFileSink.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/number_of_shells.yaml b/nipype-auto-conv/specs/interfaces/number_of_shells.yaml similarity index 87% rename from nipype-auto-conv/specs/number_of_shells.yaml rename to nipype-auto-conv/specs/interfaces/number_of_shells.yaml index 6605b62..ce8f215 100644 --- a/nipype-auto-conv/specs/number_of_shells.yaml +++ b/nipype-auto-conv/specs/interfaces/number_of_shells.yaml @@ -37,6 +37,9 @@ inputs: # passed to the field in the automatically generated unittests. in_bvals: generic/file # type=file|default=: bvals file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -53,6 +56,10 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + b_dict: b_dict_callable + # type=dict: a map of b-values (including b=0) and masks + n_shells: n_shells_callable + # type=int: number of shells templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -65,8 +72,10 @@ tests: # type=file|default=: bvals file b0_threshold: # type=float|default=50: a threshold for the low-b values + dsi_threshold: + # type=int|default=11: number of shells to call a dataset DSI imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/number_of_shells_callables.py b/nipype-auto-conv/specs/interfaces/number_of_shells_callables.py new file mode 100644 index 0000000..4f14b4a --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/number_of_shells_callables.py @@ -0,0 +1,55 @@ +"""Module to put any functions that are referred to in the "callables" section of NumberOfShells.yaml""" + + +def b_dict_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["b_dict"] + + +def b_indices_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["b_indices"] + + +def b_masks_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["b_masks"] + + +def b_values_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["b_values"] + + +def models_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["models"] + + +def n_shells_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["n_shells"] + + +def out_data_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_data"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/ensure_size.yaml b/nipype-auto-conv/specs/interfaces/piesno.yaml similarity index 77% rename from nipype-auto-conv/specs/ensure_size.yaml rename to nipype-auto-conv/specs/interfaces/piesno.yaml index c1a7ee5..849c0ea 100644 --- a/nipype-auto-conv/specs/ensure_size.yaml +++ b/nipype-auto-conv/specs/interfaces/piesno.yaml @@ -1,16 +1,14 @@ # This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.common.ensure_size.EnsureSize' from Nipype to Pydra. +# 'mriqc.interfaces.diffusion.PIESNO' from Nipype to Pydra. # # Please fill-in/edit the fields below where appropriate # # Docs # ---- -# -# Checks the size of the input image and resamples it to have `pixel_size`. -# -task_name: EnsureSize -nipype_name: EnsureSize -nipype_module: mriqc.interfaces.common +# Computes :abbr:`QC (Quality Control)` measures on the input DWI EPI scan. +task_name: PIESNO +nipype_name: PIESNO +nipype_module: mriqc.interfaces.diffusion inputs: omit: # list[str] - fields to omit from the Pydra interface @@ -23,9 +21,10 @@ inputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. in_file: generic/file - # type=file|default=: input image - in_mask: generic/file - # type=file|default=: input mask + # type=file|default=: a DWI 4D file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -39,13 +38,13 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: generic/file - # type=file: output image out_mask: generic/file - # type=file: output mask + # type=file: a 4D binary mask of spiking voxels callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + sigma: sigma_callable + # type=float: noise sigma calculated with PIESNO templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -55,13 +54,11 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) in_file: - # type=file|default=: input image - in_mask: - # type=file|default=: input mask - pixel_size: - # type=float|default=2.0: desired pixel size (mm) + # type=file|default=: a DWI 4D file + n_channels: + # type=int|default=4: number of channels imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/piesno_callables.py b/nipype-auto-conv/specs/interfaces/piesno_callables.py new file mode 100644 index 0000000..9a70b98 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/piesno_callables.py @@ -0,0 +1,20 @@ +"""Module to put any functions that are referred to in the "callables" section of PIESNO.yaml""" + + +def out_mask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_mask"] + + +def sigma_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["sigma"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/read_dwi_metadata.yaml b/nipype-auto-conv/specs/interfaces/read_dwi_metadata.yaml similarity index 84% rename from nipype-auto-conv/specs/read_dwi_metadata.yaml rename to nipype-auto-conv/specs/interfaces/read_dwi_metadata.yaml index 8fc951d..44d8f17 100644 --- a/nipype-auto-conv/specs/read_dwi_metadata.yaml +++ b/nipype-auto-conv/specs/interfaces/read_dwi_metadata.yaml @@ -26,6 +26,9 @@ inputs: # type=file|default=: the input nifti file index_db: generic/directory # type=directory|default=: a PyBIDS layout cache directory + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -39,13 +42,29 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_bvec_file: generic/file - # type=file: corresponding bvec file out_bval_file: generic/file # type=file: corresponding bval file + out_bvec_file: generic/file + # type=file: corresponding bvec file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + acquisition: acquisition_callable + # type=str: + out_dict: out_dict_callable + # type=dict: + reconstruction: reconstruction_callable + # type=str: + run: run_callable + # type=int: + session: session_callable + # type=str: + subject: subject_callable + # type=str: + suffix: suffix_callable + # type=str: + task: task_callable + # type=str: templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -63,7 +82,7 @@ tests: index_db: # type=directory|default=: a PyBIDS layout cache directory imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/read_dwi_metadata_callables.py b/nipype-auto-conv/specs/interfaces/read_dwi_metadata_callables.py new file mode 100644 index 0000000..2d22ad3 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/read_dwi_metadata_callables.py @@ -0,0 +1,90 @@ +"""Module to put any functions that are referred to in the "callables" section of ReadDWIMetadata.yaml""" + + +def acquisition_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["acquisition"] + + +def out_bmatrix_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bmatrix"] + + +def out_bval_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bval_file"] + + +def out_bvec_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bvec_file"] + + +def out_dict_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_dict"] + + +def qspace_neighbors_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["qspace_neighbors"] + + +def reconstruction_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["reconstruction"] + + +def run_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["run"] + + +def session_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["session"] + + +def subject_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["subject"] + + +def suffix_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["suffix"] + + +def task_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["task"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/rotate_vectors.yaml b/nipype-auto-conv/specs/interfaces/rotate_vectors.yaml new file mode 100644 index 0000000..b3f0043 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/rotate_vectors.yaml @@ -0,0 +1,77 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.diffusion.RotateVectors' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# Extract all b=0 volumes from a dwi series. +task_name: RotateVectors +nipype_name: RotateVectors +nipype_module: mriqc.interfaces.diffusion +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + in_file: generic/file + # type=file|default=: TSV file containing original b-vectors and b-values + reference: generic/file + # type=file|default=: dwi-related file providing the reference affine + transforms: generic/file + # type=file|default=: list of head-motion transforms + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: TSV file containing original b-vectors and b-values + reference: + # type=file|default=: dwi-related file providing the reference affine + transforms: + # type=file|default=: list of head-motion transforms + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] diff --git a/nipype-auto-conv/specs/interfaces/rotate_vectors_callables.py b/nipype-auto-conv/specs/interfaces/rotate_vectors_callables.py new file mode 100644 index 0000000..58f2798 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/rotate_vectors_callables.py @@ -0,0 +1,20 @@ +"""Module to put any functions that are referred to in the "callables" section of RotateVectors.yaml""" + + +def out_bvec_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_bvec"] + + +def out_diff_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_diff"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/rotation_mask.yaml b/nipype-auto-conv/specs/interfaces/rotation_mask.yaml similarity index 91% rename from nipype-auto-conv/specs/rotation_mask.yaml rename to nipype-auto-conv/specs/interfaces/rotation_mask.yaml index d2d9411..d9cc9e7 100644 --- a/nipype-auto-conv/specs/rotation_mask.yaml +++ b/nipype-auto-conv/specs/interfaces/rotation_mask.yaml @@ -24,6 +24,9 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: input data + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -53,7 +56,7 @@ tests: in_file: # type=file|default=: input data imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/rotation_mask_callables.py b/nipype-auto-conv/specs/interfaces/rotation_mask_callables.py new file mode 100644 index 0000000..a4b7d80 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/rotation_mask_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of RotationMask.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/select_echo.yaml b/nipype-auto-conv/specs/interfaces/select_echo.yaml similarity index 88% rename from nipype-auto-conv/specs/select_echo.yaml rename to nipype-auto-conv/specs/interfaces/select_echo.yaml index 9c8c1cc..c743985 100644 --- a/nipype-auto-conv/specs/select_echo.yaml +++ b/nipype-auto-conv/specs/interfaces/select_echo.yaml @@ -26,6 +26,9 @@ inputs: # passed to the field in the automatically generated unittests. in_files: generic/file+list-of # type=inputmultiobject|default=[]: input EPI file(s) + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -44,6 +47,10 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + echo_index: echo_index_callable + # type=int: index of the selected echo + is_multiecho: is_multiecho_callable + # type=bool: whether it is a multiecho dataset templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -59,7 +66,7 @@ tests: te_reference: # type=float|default=0.03: reference SE-EPI echo time imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/select_echo_callables.py b/nipype-auto-conv/specs/interfaces/select_echo_callables.py new file mode 100644 index 0000000..7f3e49d --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/select_echo_callables.py @@ -0,0 +1,27 @@ +"""Module to put any functions that are referred to in the "callables" section of SelectEcho.yaml""" + + +def echo_index_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["echo_index"] + + +def is_multiecho_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["is_multiecho"] + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/spikes.yaml b/nipype-auto-conv/specs/interfaces/spikes.yaml similarity index 91% rename from nipype-auto-conv/specs/spikes.yaml rename to nipype-auto-conv/specs/interfaces/spikes.yaml index 1db3062..79e9d41 100644 --- a/nipype-auto-conv/specs/spikes.yaml +++ b/nipype-auto-conv/specs/interfaces/spikes.yaml @@ -28,12 +28,15 @@ inputs: # type=file|default=: input fMRI dataset in_mask: generic/file # type=file|default=: brain mask - out_tsz: generic/file - # type=file: slice-wise z-scored timeseries (Z x N), inside brainmask - # type=file|default='spikes_tsz.txt': output file name - out_spikes: generic/file + out_spikes: Path # type=file: indices of spikes # type=file|default='spikes_idx.txt': output file name + out_tsz: Path + # type=file: slice-wise z-scored timeseries (Z x N), inside brainmask + # type=file|default='spikes_tsz.txt': output file name + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -47,15 +50,17 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_tsz: generic/file - # type=file: slice-wise z-scored timeseries (Z x N), inside brainmask - # type=file|default='spikes_tsz.txt': output file name out_spikes: generic/file # type=file: indices of spikes # type=file|default='spikes_idx.txt': output file name + out_tsz: generic/file + # type=file: slice-wise z-scored timeseries (Z x N), inside brainmask + # type=file|default='spikes_tsz.txt': output file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + num_spikes: num_spikes_callable + # type=int: number of spikes found (total) templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -85,7 +90,7 @@ tests: # type=file: indices of spikes # type=file|default='spikes_idx.txt': output file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/spikes_callables.py b/nipype-auto-conv/specs/interfaces/spikes_callables.py new file mode 100644 index 0000000..1ccb440 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/spikes_callables.py @@ -0,0 +1,27 @@ +"""Module to put any functions that are referred to in the "callables" section of Spikes.yaml""" + + +def num_spikes_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["num_spikes"] + + +def out_spikes_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_spikes"] + + +def out_tsz_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_tsz"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/add_provenance.yaml b/nipype-auto-conv/specs/interfaces/spiking_voxels_mask.yaml similarity index 72% rename from nipype-auto-conv/specs/add_provenance.yaml rename to nipype-auto-conv/specs/interfaces/spiking_voxels_mask.yaml index 5ac2dbc..a42ba6b 100644 --- a/nipype-auto-conv/specs/add_provenance.yaml +++ b/nipype-auto-conv/specs/interfaces/spiking_voxels_mask.yaml @@ -1,14 +1,14 @@ # This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.reports.AddProvenance' from Nipype to Pydra. +# 'mriqc.interfaces.diffusion.SpikingVoxelsMask' from Nipype to Pydra. # # Please fill-in/edit the fields below where appropriate # # Docs # ---- -# Builds a provenance dictionary. -task_name: AddProvenance -nipype_name: AddProvenance -nipype_module: mriqc.interfaces.reports +# Computes :abbr:`QC (Quality Control)` measures on the input DWI EPI scan. +task_name: SpikingVoxelsMask +nipype_name: SpikingVoxelsMask +nipype_module: mriqc.interfaces.diffusion inputs: omit: # list[str] - fields to omit from the Pydra interface @@ -20,12 +20,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + brain_mask: generic/file + # type=file|default=: input probabilistic brain 3D mask in_file: generic/file - # type=file|default=: input file - air_msk: generic/file - # type=file|default=: air mask file - rot_msk: generic/file - # type=file|default=: rotation mask file + # type=file|default=: a DWI 4D file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -39,6 +40,8 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + out_mask: generic/file + # type=file: a 4D binary mask of spiking voxels callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -51,15 +54,15 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) in_file: - # type=file|default=: input file - air_msk: - # type=file|default=: air mask file - rot_msk: - # type=file|default=: rotation mask file - modality: - # type=str|default='': provenance type + # type=file|default=: a DWI 4D file + brain_mask: + # type=file|default=: input probabilistic brain 3D mask + z_threshold: + # type=float|default=3.0: z-score threshold + b_masks: + # type=list|default=[]: list of ``n_shells`` b-value-wise indices lists imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/spiking_voxels_mask_callables.py b/nipype-auto-conv/specs/interfaces/spiking_voxels_mask_callables.py new file mode 100644 index 0000000..93d0e0b --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/spiking_voxels_mask_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of SpikingVoxelsMask.yaml""" + + +def out_mask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_mask"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/split_shells.yaml b/nipype-auto-conv/specs/interfaces/split_shells.yaml similarity index 89% rename from nipype-auto-conv/specs/split_shells.yaml rename to nipype-auto-conv/specs/interfaces/split_shells.yaml index 37c579e..a2b84ce 100644 --- a/nipype-auto-conv/specs/split_shells.yaml +++ b/nipype-auto-conv/specs/interfaces/split_shells.yaml @@ -22,6 +22,9 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: dwi file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -35,6 +38,8 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + out_file: generic/file+list-of + # type=outputmultiobject: output b0 file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -51,7 +56,7 @@ tests: bvals: # type=list|default=[]: bval table imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/split_shells_callables.py b/nipype-auto-conv/specs/interfaces/split_shells_callables.py new file mode 100644 index 0000000..a614995 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/split_shells_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of SplitShells.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/interfaces/structural_qc.yaml b/nipype-auto-conv/specs/interfaces/structural_qc.yaml new file mode 100644 index 0000000..26bf8b9 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/structural_qc.yaml @@ -0,0 +1,159 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.anatomical.StructuralQC' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# +# Computes anatomical :abbr:`QC (Quality Control)` measures on the +# structural image given as input +# +# +task_name: StructuralQC +nipype_name: StructuralQC +nipype_module: mriqc.interfaces.anatomical +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + air_msk: generic/file + # type=file|default=: air mask + artifact_msk: generic/file + # type=file|default=: air mask + head_msk: generic/file + # type=file|default=: head mask + in_bias: generic/file + # type=file|default=: bias file + in_file: generic/file + # type=file|default=: file to be plotted + in_noinu: generic/file + # type=file|default=: image after INU correction + in_pvms: generic/file+list-of + # type=inputmultiobject|default=[]: partial volume maps from FSL FAST + in_segm: generic/file + # type=file|default=: segmentation file from FSL FAST + in_tpms: generic/file+list-of + # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST + mni_tpms: generic/file+list-of + # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST + rot_msk: generic/file + # type=file|default=: rotation mask + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + out_noisefit: generic/file + # type=file: plot of background noise and chi fitting + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + cjv: cjv_callable + # type=float: + cnr: cnr_callable + # type=float: + efc: efc_callable + # type=float: + fber: fber_callable + # type=float: + fwhm: fwhm_callable + # type=dict: full width half-maximum measure + icvs: icvs_callable + # type=dict: intracranial volume (ICV) fractions + inu: inu_callable + # type=dict: summary statistics of the bias field + out_qc: out_qc_callable + # type=dict: output flattened dictionary with all measures + qi_1: qi_1_callable + # type=float: + rpve: rpve_callable + # type=dict: partial volume fractions + size: size_callable + # type=dict: image sizes + snr: snr_callable + # type=dict: + snrd: snrd_callable + # type=dict: + spacing: spacing_callable + # type=dict: image sizes + summary: summary_callable + # type=dict: summary statistics per tissue + tpm_overlap: tpm_overlap_callable + # type=dict: + wm2max: wm2max_callable + # type=float: + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: file to be plotted + in_noinu: + # type=file|default=: image after INU correction + in_segm: + # type=file|default=: segmentation file from FSL FAST + in_bias: + # type=file|default=: bias file + head_msk: + # type=file|default=: head mask + air_msk: + # type=file|default=: air mask + rot_msk: + # type=file|default=: rotation mask + artifact_msk: + # type=file|default=: air mask + in_pvms: + # type=inputmultiobject|default=[]: partial volume maps from FSL FAST + in_tpms: + # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST + mni_tpms: + # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST + in_fwhm: + # type=list|default=[]: smoothness estimated with AFNI + human: + # type=bool|default=True: human workflow + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] +find_replace: + - [config\.loggers\.interface\., "logger."] + - ["\n File", "\n # File"] + - ['"out_noisefit": File,', '# "out_noisefit": File,'] + - ["out_noisefit,", "# out_noisefit,"] + - [out_qc = _flatten_dict\(self._results\), "out_qc = {}"] diff --git a/nipype-auto-conv/specs/interfaces/structural_qc_callables.py b/nipype-auto-conv/specs/interfaces/structural_qc_callables.py new file mode 100644 index 0000000..56f676a --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/structural_qc_callables.py @@ -0,0 +1,132 @@ +"""Module to put any functions that are referred to in the "callables" section of StructuralQC.yaml""" + + +def cjv_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["cjv"] + + +def cnr_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["cnr"] + + +def efc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["efc"] + + +def fber_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fber"] + + +def fwhm_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["fwhm"] + + +def icvs_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["icvs"] + + +def inu_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inu"] + + +def out_noisefit_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_noisefit"] + + +def out_qc_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_qc"] + + +def qi_1_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["qi_1"] + + +def rpve_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["rpve"] + + +def size_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["size"] + + +def snr_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["snr"] + + +def snrd_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["snrd"] + + +def spacing_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["spacing"] + + +def summary_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["summary"] + + +def tpm_overlap_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["tpm_overlap"] + + +def wm2max_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["wm2max"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/conform_image.yaml b/nipype-auto-conv/specs/interfaces/synth_strip.yaml similarity index 61% rename from nipype-auto-conv/specs/conform_image.yaml rename to nipype-auto-conv/specs/interfaces/synth_strip.yaml index cd9a2c9..bdbe066 100644 --- a/nipype-auto-conv/specs/conform_image.yaml +++ b/nipype-auto-conv/specs/interfaces/synth_strip.yaml @@ -1,14 +1,14 @@ # This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.common.conform_image.ConformImage' from Nipype to Pydra. +# 'mriqc.interfaces.synthstrip.SynthStrip' from Nipype to Pydra. # # Please fill-in/edit the fields below where appropriate # # Docs # ---- # -task_name: ConformImage -nipype_name: ConformImage -nipype_module: mriqc.interfaces.common +task_name: SynthStrip +nipype_name: SynthStrip +nipype_module: mriqc.interfaces.synthstrip inputs: omit: # list[str] - fields to omit from the Pydra interface @@ -21,7 +21,18 @@ inputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. in_file: generic/file - # type=file|default=: input image + # type=file|default=: Input image to be brain extracted + model: generic/file + # type=file|default=: file containing model's weights + out_file: Path + # type=file: brain-extracted image + # type=file|default=: store brain-extracted input to file + out_mask: Path + # type=file: brain mask + # type=file|default=: store brainmask to file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -36,7 +47,11 @@ outputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. out_file: generic/file - # type=file: output conformed file + # type=file: brain-extracted image + # type=file|default=: store brain-extracted input to file + out_mask: generic/file + # type=file: brain mask + # type=file|default=: store brainmask to file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -49,13 +64,27 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) in_file: - # type=file|default=: input image - check_ras: - # type=bool|default=True: check that orientation is RAS - check_dtype: - # type=bool|default=True: check data type + # type=file|default=: Input image to be brain extracted + use_gpu: + # type=bool|default=False: Use GPU + model: + # type=file|default=: file containing model's weights + border_mm: + # type=int|default=1: Mask border threshold in mm + out_file: + # type=file: brain-extracted image + # type=file|default=: store brain-extracted input to file + out_mask: + # type=file: brain mask + # type=file|default=: store brainmask to file + num_threads: + # type=int|default=0: Number of threads + args: + # type=str|default='': Additional parameters to the command + environ: + # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/synth_strip_callables.py b/nipype-auto-conv/specs/interfaces/synth_strip_callables.py new file mode 100644 index 0000000..526cf72 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/synth_strip_callables.py @@ -0,0 +1,151 @@ +"""Module to put any functions that are referred to in the "callables" section of SynthStrip.yaml""" + +import attrs +import logging +import os +from nipype import logging +from nipype.utils.filemanip import split_filename +from nipype.interfaces.base.support import NipypeInterfaceError +from nipype.interfaces.base.traits_extension import traits + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +def out_mask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_mask"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value diff --git a/nipype-auto-conv/specs/interfaces/upload_iq_ms.yaml b/nipype-auto-conv/specs/interfaces/upload_iq_ms.yaml new file mode 100644 index 0000000..3adbf2b --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/upload_iq_ms.yaml @@ -0,0 +1,89 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'mriqc.interfaces.webapi.UploadIQMs' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# +# Upload features to MRIQCWebAPI +# +task_name: UploadIQMs +nipype_name: UploadIQMs +nipype_module: mriqc.interfaces.webapi +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + in_iqms: dict + # type=file|default=: the input IQMs-JSON file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + api_id: api_id_callable + # type=traitcompound: Id for report returned by the web api + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: + - inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_iqms: + # type=file|default=: the input IQMs-JSON file + endpoint: + # type=str|default='': URL of the POST endpoint + auth_token: + # type=str|default='': authentication token + email: + # type=str|default='': set sender email + strict: + # type=bool|default=False: crash if upload was not successful + imports: + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: [] +find_replace: + - [config\.loggers\.interface, logger] + - ["return runtime", "return api_id"] + - ["messages.QC_UPLOAD_COMPLETE", "'QC metrics successfully uploaded.'"] + - ["messages.QC_UPLOAD_START", "'MRIQC Web API: submitting to <{url}>'"] + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] diff --git a/nipype-auto-conv/specs/interfaces/upload_iq_ms_callables.py b/nipype-auto-conv/specs/interfaces/upload_iq_ms_callables.py new file mode 100644 index 0000000..8bffaf8 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/upload_iq_ms_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of UploadIQMs.yaml""" + + +def api_id_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["api_id"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/weighted_stat.yaml b/nipype-auto-conv/specs/interfaces/weighted_stat.yaml similarity index 92% rename from nipype-auto-conv/specs/weighted_stat.yaml rename to nipype-auto-conv/specs/interfaces/weighted_stat.yaml index b3665f5..5aed7ff 100644 --- a/nipype-auto-conv/specs/weighted_stat.yaml +++ b/nipype-auto-conv/specs/interfaces/weighted_stat.yaml @@ -22,6 +22,9 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: an image + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -55,7 +58,7 @@ tests: stat: # type=enum|default='mean'|allowed['mean','std']: statistic to compute imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/nipype-auto-conv/specs/interfaces/weighted_stat_callables.py b/nipype-auto-conv/specs/interfaces/weighted_stat_callables.py new file mode 100644 index 0000000..f1e00d5 --- /dev/null +++ b/nipype-auto-conv/specs/interfaces/weighted_stat_callables.py @@ -0,0 +1,13 @@ +"""Module to put any functions that are referred to in the "callables" section of WeightedStat.yaml""" + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L568 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return _results diff --git a/nipype-auto-conv/specs/number_of_shells_callables.py b/nipype-auto-conv/specs/number_of_shells_callables.py deleted file mode 100644 index 63378f6..0000000 --- a/nipype-auto-conv/specs/number_of_shells_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in NumberOfShells.yaml""" diff --git a/nipype-auto-conv/specs/package.yaml b/nipype-auto-conv/specs/package.yaml new file mode 100644 index 0000000..ab44c0c --- /dev/null +++ b/nipype-auto-conv/specs/package.yaml @@ -0,0 +1,63 @@ +# name of the package to generate, e.g. pydra.tasks.mriqc +name: pydra.tasks.mriqc +# name of the nipype package to generate from (e.g. mriqc) +nipype_name: mriqc +# The name of the global struct/dict that contains workflow inputs that are to be converted to inputs of the function along with the type of the struct, either "dict" or "class" +config_params: + wf: + varname: config.workflow + type: struct + module: mriqc + exec: + varname: config.execution + type: struct + module: mriqc + defaults: + work_dir: Path.cwd() + nipype: + varname: config.nipype + type: struct + module: mriqc + env: + varname: config.environment + type: struct + module: mriqc +omit_functions: + - nipype.external.due.BibTeX +omit_classes: + - niworkflows.interfaces.bids._ReadSidecarJSONOutputSpec + - mriqc.interfaces.diffusion._ReadDWIMetadataOutputSpec +omit_constants: + - nipype.utils.filemanip._cifs_table + - nipype.config + - nipype.logging +# Mappings between nipype packages and their pydra equivalents. Regular expressions are supported +import_translations: + - [nireports, pydra.tasks.nireports] + - [niworkflows, pydra.tasks.niworkflows] +find_replace: + - [config\.loggers\.\w+\., logger.] + - [config.to_filename\(\), ""] + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - ["class _ReadDWIMetadataOutputSpec.+?(?=\\n\\n)", ""] + - ["dataset = wf_inputs\\.get\\(.*?_datalad_get\\(\\w+\\)", ""] + - ["DWIDenoise", "DwiDenoise"] + # - [ + # "dict\\[int, \\(float, float\\)\\]", + # "ty.Dict[int, ty.Tuple[float, float]]", + # ] + # - [ + # "dict\\[str, float \\| np.ndarray\\]", + # "ty.Dict[str, ty.Union[float, np.ndarray]]", + # ] + # - ["\\bdict\\[", "ty.Dict["] +omit_modules: + - "mriqc.config" +import_find_replace: + - ["from \\.\\. import config, logging", ""] + - ["_ReadDWIMetadataOutputSpec,", ""] + - ["from pydra.tasks.mriqc.nipype_ports.interfaces import utility as niu", ""] +copy_packages: + - mriqc.data diff --git a/nipype-auto-conv/specs/read_dwi_metadata_callables.py b/nipype-auto-conv/specs/read_dwi_metadata_callables.py deleted file mode 100644 index 197e018..0000000 --- a/nipype-auto-conv/specs/read_dwi_metadata_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in ReadDWIMetadata.yaml""" diff --git a/nipype-auto-conv/specs/rotation_mask_callables.py b/nipype-auto-conv/specs/rotation_mask_callables.py deleted file mode 100644 index c844375..0000000 --- a/nipype-auto-conv/specs/rotation_mask_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in RotationMask.yaml""" diff --git a/nipype-auto-conv/specs/select_echo_callables.py b/nipype-auto-conv/specs/select_echo_callables.py deleted file mode 100644 index cce5cf5..0000000 --- a/nipype-auto-conv/specs/select_echo_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in SelectEcho.yaml""" diff --git a/nipype-auto-conv/specs/spikes_callables.py b/nipype-auto-conv/specs/spikes_callables.py deleted file mode 100644 index b4f0c3b..0000000 --- a/nipype-auto-conv/specs/spikes_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in Spikes.yaml""" diff --git a/nipype-auto-conv/specs/split_shells_callables.py b/nipype-auto-conv/specs/split_shells_callables.py deleted file mode 100644 index ec593c9..0000000 --- a/nipype-auto-conv/specs/split_shells_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in SplitShells.yaml""" diff --git a/nipype-auto-conv/specs/structural_qc.yaml b/nipype-auto-conv/specs/structural_qc.yaml deleted file mode 100644 index bb95b78..0000000 --- a/nipype-auto-conv/specs/structural_qc.yaml +++ /dev/null @@ -1,116 +0,0 @@ -# This file is used to manually specify the semi-automatic conversion of -# 'mriqc.interfaces.anatomical.StructuralQC' from Nipype to Pydra. -# -# Please fill-in/edit the fields below where appropriate -# -# Docs -# ---- -# -# Computes anatomical :abbr:`QC (Quality Control)` measures on the -# structural image given as input -# -# -task_name: StructuralQC -nipype_name: StructuralQC -nipype_module: mriqc.interfaces.anatomical -inputs: - omit: - # list[str] - fields to omit from the Pydra interface - rename: - # dict[str, str] - fields to rename in the Pydra interface - types: - # dict[str, type] - override inferred types (use "mime-like" string for file-format types, - # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred - # from the nipype interface, but you may want to be more specific, particularly - # for file types, where specifying the format also specifies the file that will be - # passed to the field in the automatically generated unittests. - in_file: generic/file - # type=file|default=: file to be plotted - in_noinu: generic/file - # type=file|default=: image after INU correction - in_segm: generic/file - # type=file|default=: segmentation file from FSL FAST - in_bias: generic/file - # type=file|default=: bias file - head_msk: generic/file - # type=file|default=: head mask - air_msk: generic/file - # type=file|default=: air mask - rot_msk: generic/file - # type=file|default=: rotation mask - artifact_msk: generic/file - # type=file|default=: air mask - in_pvms: generic/file+list-of - # type=inputmultiobject|default=[]: partial volume maps from FSL FAST - in_tpms: generic/file+list-of - # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST - mni_tpms: generic/file+list-of - # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST - metadata: - # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) -outputs: - omit: - # list[str] - fields to omit from the Pydra interface - rename: - # dict[str, str] - fields to rename in the Pydra interface - types: - # dict[str, type] - override inferred types (use "mime-like" string for file-format types, - # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred - # from the nipype interface, but you may want to be more specific, particularly - # for file types, where specifying the format also specifies the file that will be - # passed to the field in the automatically generated unittests. - out_noisefit: generic/file - # type=file: plot of background noise and chi fitting - callables: - # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` - # to set to the `callable` attribute of output fields - templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - requirements: - # dict[str, list[str]] - input fields that are required to be provided for the output field to be present -tests: -- inputs: - # dict[str, str] - values to provide to inputs fields in the task initialisation - # (if not specified, will try to choose a sensible value) - in_file: - # type=file|default=: file to be plotted - in_noinu: - # type=file|default=: image after INU correction - in_segm: - # type=file|default=: segmentation file from FSL FAST - in_bias: - # type=file|default=: bias file - head_msk: - # type=file|default=: head mask - air_msk: - # type=file|default=: air mask - rot_msk: - # type=file|default=: rotation mask - artifact_msk: - # type=file|default=: air mask - in_pvms: - # type=inputmultiobject|default=[]: partial volume maps from FSL FAST - in_tpms: - # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST - mni_tpms: - # type=inputmultiobject|default=[]: tissue probability maps from FSL FAST - in_fwhm: - # type=list|default=[]: smoothness estimated with AFNI - human: - # type=bool|default=True: human workflow - imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item - # consisting of 'module', 'name', and optionally 'alias' keys - expected_outputs: - # dict[str, str] - expected values for selected outputs, noting that tests will typically - # be terminated before they complete for time-saving reasons, and therefore - # these values will be ignored, when running in CI - timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised - # successfully. Set to 0 to disable the timeout (warning, this could - # lead to the unittests taking a very long time to complete) - xfail: true - # bool - whether the unittest is expected to fail or not. Set to false - # when you are satisfied with the edits you have made to this file -doctests: [] diff --git a/nipype-auto-conv/specs/structural_qc_callables.py b/nipype-auto-conv/specs/structural_qc_callables.py deleted file mode 100644 index 28d0952..0000000 --- a/nipype-auto-conv/specs/structural_qc_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in StructuralQC.yaml""" diff --git a/nipype-auto-conv/specs/upload_iq_ms_callables.py b/nipype-auto-conv/specs/upload_iq_ms_callables.py deleted file mode 100644 index 6cc7c99..0000000 --- a/nipype-auto-conv/specs/upload_iq_ms_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in UploadIQMs.yaml""" diff --git a/nipype-auto-conv/specs/weighted_stat_callables.py b/nipype-auto-conv/specs/weighted_stat_callables.py deleted file mode 100644 index 3ea363d..0000000 --- a/nipype-auto-conv/specs/weighted_stat_callables.py +++ /dev/null @@ -1 +0,0 @@ -"""Module to put any functions that are referred to in WeightedStat.yaml""" diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.airmsk_wf.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.airmsk_wf.yaml new file mode 100644 index 0000000..d3b72c9 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.airmsk_wf.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: airmsk_wf +# name of the nipype workflow constructor +nipype_name: airmsk_wf +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.anat_qc_workflow.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.anat_qc_workflow.yaml new file mode 100644 index 0000000..62caab2 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.anat_qc_workflow.yaml @@ -0,0 +1,23 @@ +# name of the converted workflow constructor function +name: anat_qc_workflow +# name of the nipype workflow constructor +nipype_name: anat_qc_workflow +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: + - nirodents.workflows.brainextraction.init_rodent_brain_extraction_wf +find_replace: + - [from pydra.tasks.mriqc.messages import BUILDING_WORKFLOW, ""] + - [BUILDING_WORKFLOW, "'Building {modality} MRIQC workflow {detail}.'"] + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - [ + "# fmt: off\\n\\s*workflow.set_output\\(\\[\\('iqmswf_measures', workflow.iqmswf.lzout.measures\\)\\]\\)", + "", + ] diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.compute_iqms.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.compute_iqms.yaml new file mode 100644 index 0000000..078cc79 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.compute_iqms.yaml @@ -0,0 +1,29 @@ +# name of the converted workflow constructor function +name: compute_iqms +# name of the nipype workflow constructor +nipype_name: compute_iqms +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +outputs: + measures: + node_name: measures + field: out_qc + replaces: + - [outputnode, out_file] + noise_report: + node_name: getqi2 + field: out_file + export: true + replaces: + - [outputnode, noisefit] +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.headmsk_wf.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.headmsk_wf.yaml new file mode 100644 index 0000000..12453c0 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.headmsk_wf.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: headmsk_wf +# name of the nipype workflow constructor +nipype_name: headmsk_wf +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.init_brain_tissue_segmentation.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.init_brain_tissue_segmentation.yaml new file mode 100644 index 0000000..48bcb66 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.init_brain_tissue_segmentation.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: init_brain_tissue_segmentation +# name of the nipype workflow constructor +nipype_name: init_brain_tissue_segmentation +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.spatial_normalization.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.spatial_normalization.yaml new file mode 100644 index 0000000..4f7cac1 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.base.spatial_normalization.yaml @@ -0,0 +1,26 @@ +# name of the converted workflow constructor function +name: spatial_normalization +# name of the nipype workflow constructor +nipype_name: spatial_normalization +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +outputs: + report: + node_name: norm + field: out_report + export: true + replaces: + - ["outputnode", "out_report"] +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.output.init_anat_report_wf.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.output.init_anat_report_wf.yaml new file mode 100644 index 0000000..9af5b76 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.anatomical.output.init_anat_report_wf.yaml @@ -0,0 +1,47 @@ +# name of the converted workflow constructor function +name: init_anat_report_wf +# name of the nipype workflow constructor +nipype_name: init_anat_report_wf +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.anatomical.output +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +outputs: + zoom_report: + node_name: mosaic_zoom + field: out_file + export: true + bg_report: + node_name: mosaic_noise + field: out_file + export: true + segm_report: + node_name: plot_segm + field: out_file + export: true + bmask_report: + node_name: plot_bmask + field: out_file + export: true + artmask_report: + node_name: plot_artmask + field: out_file + export: true + airmask_report: + node_name: plot_airmask + field: out_file + export: true + headmask_report: + node_name: plot_headmask + field: out_file + export: true +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - ["if not verbose:\\n\\s*return workflow", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.compute_iqms.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.compute_iqms.yaml new file mode 100644 index 0000000..0c560b8 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.compute_iqms.yaml @@ -0,0 +1,30 @@ +# name of the converted workflow constructor function +name: compute_iqms +# name of the nipype workflow constructor +nipype_name: compute_iqms +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.diffusion.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +outputs: + out_file: + node_name: measures + field: out_qc + export: true + replaces: + - [outputnode, out_file] + noise_floor: + node_name: estimate_sigma + field: out + export: true + replaces: + - [outputnode, noise_floor] +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.dmri_qc_workflow.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.dmri_qc_workflow.yaml new file mode 100644 index 0000000..9d094af --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.dmri_qc_workflow.yaml @@ -0,0 +1,44 @@ +# name of the converted workflow constructor function +name: dmri_qc_workflow +# name of the nipype workflow constructor +nipype_name: dmri_qc_workflow +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.diffusion.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +inputs: + bvals: + node_name: load_bmat + field: out_bval_file + type: medimage/bval + bvecs: + node_name: load_bmat + field: out_bvec_file + type: medimage/bvec + qspace_neighbors: + node_name: load_bmat + field: qspace_neighbors + # type: field/integer+list-of.list-of +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - [from pydra.tasks.mriqc.messages import BUILDING_WORKFLOW, ""] + - [BUILDING_WORKFLOW, "'Building {modality} MRIQC workflow {detail}.'"] + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - [ + "workflow\\.dwidenoise\\.inputs\\.in_file", + "workflow.dwidenoise.inputs.dwi", + ] + - [ + "in_file=workflow\\.dwidenoise\\.lzout\\.out_file", + "in_file=workflow.dwidenoise.lzout.out", + ] + # - [ + # "workflow.set_output\\(\\n(\\s*)\\[\\(\"dwi_report_wf_spikes_report\", workflow.dwi_report_wf.lzout.spikes_report\\)\\n(\\s*)\\]\\n(\\s*)\\)", + # "if wf_fft_spikes_detector:\\n workflow.set_output(\\n \\1[(\"dwi_report_wf_spikes_report\", workflow.dwi_report_wf.lzout.spikes_report)\\n \\2]\\n \\3)", + # ] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.epi_mni_align.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.epi_mni_align.yaml new file mode 100644 index 0000000..5df89ec --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.epi_mni_align.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: epi_mni_align +# name of the nipype workflow constructor +nipype_name: epi_mni_align +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.diffusion.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.hmc_workflow.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.hmc_workflow.yaml new file mode 100644 index 0000000..5de2ca9 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.base.hmc_workflow.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: hmc_workflow +# name of the nipype workflow constructor +nipype_name: hmc_workflow +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.diffusion.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.output.init_dwi_report_wf.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.output.init_dwi_report_wf.yaml new file mode 100644 index 0000000..c837d27 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.diffusion.output.init_dwi_report_wf.yaml @@ -0,0 +1,58 @@ +# name of the converted workflow constructor function +name: init_dwi_report_wf +# name of the nipype workflow constructor +nipype_name: init_dwi_report_wf +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.diffusion.output +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +outputs: + snr_report: + node_name: mosaic_snr + field: out_report + export: true + noise_report: + node_name: mosaic_noise + field: out_file + export: true + fa_report: + node_name: mosaic_fa + field: out_file + export: true + md_report: + node_name: mosaic_md + field: out_file + export: true + heatmap_report: + node_name: plot_heatmap + field: out_file + export: true + spikes_report: + node_name: mosaic_spikes + field: out_file + export: true + carpet_report: + node_name: bigplot + field: out_file + export: true + # bg_report: # seems to be the same as the noise report + # node_name: mosaic_noise + # field: out_file + bmask_report: + node_name: plot_bmask + field: out_file + export: true +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - ["out_file=workflow\\.lzin\\.epi_mean,\\n", ""] # multiple connections to out_file in workflow + - ["if True:\\n\\s*return workflow", ""] + - ["if wf_fft_spikes_detector:", "if True: # wf_fft_spikes_detector:"] + - ["if not verbose:", "if False: # not verbose:"] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.compute_iqms.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.compute_iqms.yaml new file mode 100644 index 0000000..84b4660 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.compute_iqms.yaml @@ -0,0 +1,59 @@ +# name of the converted workflow constructor function +name: compute_iqms +# name of the nipype workflow constructor +nipype_name: compute_iqms +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.functional.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +outputs: + out_file: + node_name: measures + field: out_qc + export: true + replaces: + - [outputnode, out_file] + spikes: + node_name: spikes_fft + field: out_spikes + export: true + replaces: + - ["outputnode", "out_spikes"] + fft: + node_name: spikes_fft + field: out_fft + export: true + replaces: + - ["outputnode", "out_fft"] + spikes_num: + node_name: spikes_fft + field: n_spikes + type: field/integer + export: true + replaces: + - ["outputnode", "spikes_num"] + outliers: + node_name: outliers + field: out_file + export: true + replaces: + - ["outputnode", "outliers"] + dvars: + node_name: dvnode + field: out_all + export: true + replaces: + - ["outputnode", "out_dvars"] +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - [ + "if wf_fft_spikes_detector:", + "if True: # wf_fft_spikes_detector: - disabled to ensure all outputs are generated", + ] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.epi_mni_align.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.epi_mni_align.yaml new file mode 100644 index 0000000..4e1f2ee --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.epi_mni_align.yaml @@ -0,0 +1,36 @@ +# name of the converted workflow constructor function +name: epi_mni_align +# name of the nipype workflow constructor +nipype_name: epi_mni_align +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.functional.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +outputs: + epi_parc: + node_name: invt + field: output_image + replaces: + - ["outputnode", "epi_parc"] + epi_mni: + node_name: norm + field: warped_image + replaces: + - ["outputnode", "epi_mri"] + report: + node_name: norm + field: mni_report + export: true + replaces: + - ["outputnode", "out_report"] +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.fmri_bmsk_workflow.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.fmri_bmsk_workflow.yaml new file mode 100644 index 0000000..ff05860 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.fmri_bmsk_workflow.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: fmri_bmsk_workflow +# name of the nipype workflow constructor +nipype_name: fmri_bmsk_workflow +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.functional.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.fmri_qc_workflow.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.fmri_qc_workflow.yaml new file mode 100644 index 0000000..d6796e0 --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.fmri_qc_workflow.yaml @@ -0,0 +1,29 @@ +# name of the converted workflow constructor function +name: fmri_qc_workflow +# name of the nipype workflow constructor +nipype_name: fmri_qc_workflow +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.functional.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +inputs: + metadata: + node_name: meta + field: out_dict + type: dict +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - [from pydra.tasks.mriqc.messages import BUILDING_WORKFLOW, ""] + - [BUILDING_WORKFLOW, "'Building {modality} MRIQC workflow {detail}.'"] + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - ["full_files = \\[\\].*?= full_files", ""] + - [ + "# fmt: off\\n\\s*workflow.set_output\\(\\[\\('iqmswf_out_file', workflow.iqmswf.lzout.out_file\\)\\]\\)", + "", + ] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.hmc.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.hmc.yaml new file mode 100644 index 0000000..970269e --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.base.hmc.yaml @@ -0,0 +1,19 @@ +# name of the converted workflow constructor function +name: hmc +# name of the nipype workflow constructor +nipype_name: hmc +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.functional.base +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.output.init_func_report_wf.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.output.init_func_report_wf.yaml new file mode 100644 index 0000000..36c527e --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.functional.output.init_func_report_wf.yaml @@ -0,0 +1,48 @@ +# name of the converted workflow constructor function +name: init_func_report_wf +# name of the nipype workflow constructor +nipype_name: init_func_report_wf +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.functional.output +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +outputs: + mean_report: + node_name: mosaic_mean + field: out_file + export: true + stdev_report: + node_name: mosaic_stddev + field: out_file + export: true + background_report: + node_name: mosaic_noise + field: out_file + export: true + zoomed_report: + node_name: mosaic_zoom + field: out_file + export: true + carpet_report: + node_name: bigplot + field: out_file + export: true + spikes_report: + node_name: mosaic_spikes + field: out_file + export: true + +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] + - [ + "if wf_fft_spikes_detector:", + "if True: # wf_fft_spikes_detector: - disabled so output is always created", + ] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null diff --git a/nipype-auto-conv/specs/workflows/mriqc.workflows.shared.synthstrip_wf.yaml b/nipype-auto-conv/specs/workflows/mriqc.workflows.shared.synthstrip_wf.yaml new file mode 100644 index 0000000..942477a --- /dev/null +++ b/nipype-auto-conv/specs/workflows/mriqc.workflows.shared.synthstrip_wf.yaml @@ -0,0 +1,21 @@ +# name of the converted workflow constructor function +name: synthstrip_wf +# name of the nipype workflow constructor +nipype_name: synthstrip_wf +# name of the nipype module the function is found within, e.g. mriqc.workflows.anatomical.base +nipype_module: mriqc.workflows.shared +# Name of the node that is to be considered the input of the workflow, i.e. its outputs will be the inputs of the workflow +input_node: inputnode +# # Name of the node that is to be considered the output of the workflow, i.e. its inputs will be the outputs of the workflow +output_node: outputnode +# Generic regular expression substitutions to be run over the code before it is processed +find_replace: + - ["config = NipypeConfig\\(\\)", ""] + - ["iflogger = logging.getLogger\\(\"nipype.interface\"\\)", ""] + - ["logging = Logging\\(config\\)", ""] +# name of the workflow variable that is returned +workflow_variable: workflow +# the names of the nested workflows that are defined in other modules and need to be imported +external_nested_workflows: null +test_inputs: + omp_nthreads: 1 diff --git a/pydra/tasks/mriqc/__init__.py b/pydra/tasks/mriqc/__init__.py index 4dfb7cd..eda89fd 100644 --- a/pydra/tasks/mriqc/__init__.py +++ b/pydra/tasks/mriqc/__init__.py @@ -3,8 +3,9 @@ imported. >>> import pydra.engine ->>> import pydra.tasks.anatomical +>>> import pydra.tasks.mriqc """ + from warnings import warn from pathlib import Path @@ -14,24 +15,20 @@ from ._version import __version__ except ImportError: raise RuntimeError( - "pydra-anatomical has not been properly installed, please run " + "pydra-mriqc has not been properly installed, please run " f"`pip install -e {str(pkg_path)}` to install a development version" ) -if "nipype" not in __version__: +if "post" not in __version__: try: - from .auto._version import nipype_version, nipype2pydra_version + from ._post_release import post_release except ImportError: warn( "Nipype interfaces haven't been automatically converted from their specs in " f"`nipype-auto-conv`. Please run `{str(pkg_path / 'nipype-auto-conv' / 'generate')}` " - "to generated the converted Nipype interfaces in pydra.tasks.anatomical.auto" + "to generated the converted Nipype interfaces in pydra.tasks.mriqc" ) else: - n_ver = nipype_version.replace(".", "_") - n2p_ver = nipype2pydra_version.replace(".", "_") - __version__ += ( - "_" if "+" in __version__ else "+" - ) + f"nipype{n_ver}_nipype2pydra{n2p_ver}" + __version__ += "post" + post_release __all__ = ["__version__"] diff --git a/pydra/tasks/mriqc/latest.py b/pydra/tasks/mriqc/latest.py deleted file mode 100644 index f41e057..0000000 --- a/pydra/tasks/mriqc/latest.py +++ /dev/null @@ -1,3 +0,0 @@ -PACKAGE_VERSION = "v1" - -from .v1 import * # noqa diff --git a/pydra/tasks/mriqc/workflows/__init__.py b/pydra/tasks/mriqc/workflows/__init__.py deleted file mode 100644 index 36ff3a6..0000000 --- a/pydra/tasks/mriqc/workflows/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -""" -.. automodule:: mriqc.workflows.anatomical - :members: - :undoc-members: - :show-inheritance: - - -.. automodule:: mriqc.workflows.functional - :members: - :undoc-members: - :show-inheritance: - -""" -from mriqc.workflows.anatomical.base import anat_qc_workflow -from mriqc.workflows.functional.base import fmri_qc_workflow - -__all__ = [ - "anat_qc_workflow", - "fmri_qc_workflow", -] diff --git a/pydra/tasks/mriqc/workflows/anatomical/__init__.py b/pydra/tasks/mriqc/workflows/anatomical/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pydra/tasks/mriqc/workflows/anatomical/base.py b/pydra/tasks/mriqc/workflows/anatomical/base.py deleted file mode 100644 index 030e8e3..0000000 --- a/pydra/tasks/mriqc/workflows/anatomical/base.py +++ /dev/null @@ -1,1159 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -""" -Anatomical workflow -=================== - -.. image :: _static/anatomical_workflow_source.svg - -The anatomical workflow follows the following steps: - -#. Conform (reorientations, revise data types) input data and read - associated metadata. -#. Skull-stripping (AFNI). -#. Calculate head mask -- :py:func:`headmsk_wf`. -#. Spatial Normalization to MNI (ANTs) -#. Calculate air mask above the nasial-cerebelum plane -- :py:func:`airmsk_wf`. -#. Brain tissue segmentation (FAST). -#. Extraction of IQMs -- :py:func:`compute_iqms`. -#. Individual-reports generation -- - :py:func:`~mriqc.workflows.anatomical.output.init_anat_report_wf`. - -This workflow is orchestrated by :py:func:`anat_qc_workflow`. - -For the skull-stripping, we use ``afni_wf`` from ``niworkflows.anat.skullstrip``: - -.. workflow:: - - from niworkflows.anat.skullstrip import afni_wf - from mriqc.testing import mock_config - with mock_config(): - wf = afni_wf() -""" -import pydra -from pydra import Workflow -from pydra.tasks.mriqc.auto import ( - UploadIQMs, - ArtifactMask, - ComputeQI2, - ConformImage, - IQMFileSink, - RotationMask, - StructuralQC, -) -from mriqc.messages import BUILDING_WORKFLOW - -from mriqc import config - -# from mriqc.interfaces.reports import AddProvenance -# from mriqc.interfaces.datalad import DataladIdentityInterface -from mriqc.messages import BUILDING_WORKFLOW -from pydra.tasks.mriqc.workflows.utils import get_fwhmx -from pydra.tasks.mriqc.workflows.anatomical.output import init_anat_report_wf - - -# from nipype.interfaces import utility as niu -# from nipype.pipeline import engine as pe - -from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms -from templateflow.api import get as get_template - - -def anat_qc_workflow(modality, name="anatMRIQC"): - """ - One-subject-one-session-one-run pipeline to extract the NR-IQMs from - anatomical images - - .. workflow:: - - import os.path as op - from mriqc.workflows.anatomical.base import anat_qc_workflow - from mriqc.testing import mock_config - with mock_config(): - wf = anat_qc_workflow() - - """ - from pydra.tasks.mriqc.workflows.shared import synthstrip_wf - - dataset = config.workflow.inputs.get("t1w", []) + config.workflow.inputs.get( - "t2w", [] - ) - - message = BUILDING_WORKFLOW.format( - modality="anatomical", - detail=( - f"for {len(dataset)} NIfTI files." - if len(dataset) > 2 - else f"({' and '.join(('<%s>' % v for v in dataset))})." - ), - ) - config.loggers.workflow.info(message) - - # Initialize workflow - workflow = Workflow( - name=name, input_spec=["in_file"] - ) # specifying `input_spec` to contain ["in_file"] makes a field accessible at workflow.lzin.in_file - - # 1. Reorient anatomical image - # to_ras = pe.Node(ConformImage(check_dtype=False), name="conform") - workflow.add( - ConformImage(in_file=workflow.lzin.in_file, check_dtype=False, name="to_ras") - ) - - # 2. species specific skull-stripping - # if config.workflow.species.lower() == "human": - workflow.add( - synthstrip_wf( - omp_nthreads=config.nipype.omp_nthreads, - in_files=workflow.to_ras.lzout.out_file, - name="skull_stripping", - ) - ) - ss_bias_field = "bias_image" - # else: - # from nirodents.workflows.brainextraction import init_rodent_brain_extraction_wf - - # skull_stripping = init_rodent_brain_extraction_wf(template_id=config.workflow.template_id) - # ss_bias_field = "final_n4.bias_image" - - # 3. Head mask - workflow.add( - headmsk_wf( - in_file=workflow.skull_stripping.lzout.out_corrected, - brain_mask=workflow.skull_stripping.lzout.out_mask, - omp_nthreads=config.nipype.omp_nthreads, - name="hmsk", - ) - ) - # 4. Spatial Normalization, using ANTs - workflow.add( - spatial_normalization( - **{modality: workflow.lzin.in_file}, - moving_image=workflow.skull_stripping.lzout.outcorrected, - moving_mask=workflow.skullstripping.lzout.out_mask, - name="norm", - ) - ) - # 5. Air mask (with and without artifacts) - workflow.add(airmsk_wf(ind2std_xfm=workflow.norm.lzout.ind2std_xfm, name="amw")) - # 6. Brain tissue segmentation - workflow.add( - init_brain_tissue_segmentation( - brainmask=workflow.skull_stripping.lzout.out_mask, - std_tpms=workflow.norm.lzout.out_tpms, - name="bts", - ) - ) - - # 7. Compute IQMs - workflow.add( - compute_iqms( - in_file=workflow.lzin.in_file, - std_tpms=workflow.norm.lzout.out_tpms, - name="iqmswf", - ) - ) - - # Reports - workflow.add( - init_anat_report_wf( - mni_report=workflow.norm.lzout.out_report, name="anat_report_wf" - ) - ) - - # Connect all nodes - # fmt: off - workflow.add_connections([ - (norm, hmsk, [("outputnode.out_tpms", "inputnode.in_tpms")]), - (to_ras, amw, [("out_file", "inputnode.in_file")]), - (skull_stripping, amw, [("outputnode.out_mask", "inputnode.in_mask")]), - (hmsk, amw, [("outputnode.out_file", "inputnode.head_mask")]), - (to_ras, iqmswf, [("out_file", "inputnode.in_ras")]), - (skull_stripping, iqmswf, [("outputnode.out_corrected", "inputnode.inu_corrected"), - (ss_bias_field, "inputnode.in_inu"), - ("outputnode.out_mask", "inputnode.brainmask")]), - (amw, iqmswf, [("outputnode.air_mask", "inputnode.airmask"), - ("outputnode.hat_mask", "inputnode.hatmask"), - ("outputnode.art_mask", "inputnode.artmask"), - ("outputnode.rot_mask", "inputnode.rotmask")]), - (hmsk, bts, [("outputnode.out_denoised", "inputnode.in_file")]), - (bts, iqmswf, [("outputnode.out_segm", "inputnode.segmentation"), - ("outputnode.out_pvms", "inputnode.pvms")]), - (hmsk, iqmswf, [("outputnode.out_file", "inputnode.headmask")]), - (to_ras, anat_report_wf, [("out_file", "inputnode.in_ras")]), - (skull_stripping, anat_report_wf, [ - ("outputnode.out_corrected", "inputnode.inu_corrected"), - ("outputnode.out_mask", "inputnode.brainmask")]), - (hmsk, anat_report_wf, [("outputnode.out_file", "inputnode.headmask")]), - (amw, anat_report_wf, [ - ("outputnode.air_mask", "inputnode.airmask"), - ("outputnode.art_mask", "inputnode.artmask"), - ("outputnode.rot_mask", "inputnode.rotmask"), - ]), - (bts, anat_report_wf, [("outputnode.out_segm", "inputnode.segmentation")]), - (iqmswf, anat_report_wf, [("outputnode.noisefit", "inputnode.noisefit")]), - (iqmswf, anat_report_wf, [("outputnode.out_file", "inputnode.in_iqms")]), - (iqmswf, outputnode, [("outputnode.out_file", "out_json")]), - ]) - # fmt: on - - # # Upload metrics - # @pydra.mark.task - # def upload_iqms(in_iqms, endpoint, auth_token, strict): - # from mriqc.interfaces.webapi import UploadIQMs - - # upldwf = UploadIQMs( - # in_iqms=in_iqms, endpoint=endpoint, auth_token=auth_token, strict=strict - # ) - # return upldwf.api_id - - # # fmt: off - # @pydra.mark.task - # def upload_metrics(endpoint, auth_token, strict, in_iqms): - # upload_iqms_result = upload_iqms(in_iqms=in_iqms, endpoint=endpoint, auth_token=auth_token, strict=strict) - # return upload_iqms_result - - # returns the result - workflow.add( - UploadIQMs( - endpoint=config.execution.webapi_url, - auth_token=config.execution.webapi_token, - strict=config.execution.upload_strict, - in_iqms=workflow.iqmswf.lzout.outputnode.out_file, - name="upldwf", - ) - ) - # workflow.ad_connections( - # [ - # (iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]), - # (upldwf, anat_report_wf, [("api_id", "inputnode.api_id")]), - # ] - # ) - - # # Original Code - # if not config.execution.no_sub: - # from mriqc.interfaces.webapi import UploadIQMs - - # upldwf = pe.Node( - # UploadIQMs( - # endpoint=config.execution.webapi_url, - # auth_token=config.execution.webapi_token, - # strict=config.execution.upload_strict, - # ), - # name="UploadMetrics", - # ) - - # # fmt: off - # workflow.ad_connections([ - # (iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]), - # (upldwf, anat_report_wf, [("api_id", "inputnode.api_id")]), - # ]) - # # fmt: on - - return workflow - - -def spatial_normalization(name="SpatialNormalization"): - """Create a simplified workflow to perform fast spatial normalization.""" - from niworkflows.interfaces.reportlets.registration import ( - SpatialNormalizationRPT as RobustMNINormalization, - ) - - # Have the template id handy - tpl_id = config.workflow.template_id - - # Define workflow interface - # workflow = pe.Workflow(name=name) - workflow = Workflow(name=name, input_spec=input_spec) - - # # Define input and output nodes - # inputnode = Node(interface=input_spec, name="inputnode") - - # # inputnode = pe.Node( - # # niu.IdentityInterface(fields=["moving_image", "moving_mask", "modality"]), - # # name="inputnode", - # # ) - # outputnode = Node(interface=output_spec, name="outputnode") - - # outputnode = pe.Node( - # niu.IdentityInterface(fields=["out_tpms", "out_report", "ind2std_xfm"]), - # name="outputnode", - # ) - - # # Spatial normalization - # @pydra.mark.task - # def spatial_normalization( - # flavor, - # num_threads, - # ants_float, - # template, - # generate_report, - # species, - # tpl_id, - # ): - - # no pe.node here - workflow.add( - RobustMNINormalization( - flavor=["testing", "fast"][config.execution.debug], - num_threads=config.nipype.omp_nthreads, - float=config.execution.ants_float, - template=tpl_id, - generate_report=True, - name="SpatialNormalization", - # Request all MultiProc processes when ants_nthreads > n_procs - num_threads=config.nipype.omp_nthreads, - mem_gb=3, - ) - ) - if config.workflow.species.lower() == "human": - norm.inputs.reference_mask = str( - get_template(tpl_id, resolution=2, desc="brain", suffix="mask") - ) - else: - norm.inputs.reference_image = str(get_template(tpl_id, suffix="T2w")) - norm.inputs.reference_mask = str( - get_template(tpl_id, desc="brain", suffix="mask")[0] - ) - - return workflow - - -# Create a Pydra workflow -wf = Workflow(name="SpatialNormalizationWorkflow") - -# Define input parameters -flavor = ["testing", "fast"][config.execution.debug] -num_threads = config.nipype.omp_nthreads -ants_float = config.execution.ants_float -template = tpl_id -generate_report = True -species = config.workflow.species.lower() -tpl_id = "your_template_id_here" # Replace with actual value - -# Add the spatial normalization task to the workflow -wf.add( - spatial_normalization( - flavor=flavor, - num_threads=num_threads, - ants_float=ants_float, - template=template, - generate_report=generate_report, - species=species, - tpl_id=tpl_id, - ), - name="SpatialNormalization", - num_threads=config.nipype.omp_nthreads, - mem_gb=3, -) - -# Execute the workflow -with pydra.Submitter(plugin="cf") as sub: - wf(submitter=sub) - - #### up to here (20/03/2024) - - # Draft conversion - - import pydra - - -@pydra.mark.task -def project_tpm_to_t1w_space(template_id, species, workflow_name): - from niworkflows.interfaces.ants import ApplyTransforms - from mriqc.utils.misc import get_template - - if species.lower() == "human": - resolution = 1 - else: - resolution = None - - tpms_std2t1w = ApplyTransforms( - dimension=3, - default_value=0, - interpolation="Gaussian", - float=config.execution.ants_float, - ) - tpms_std2t1w.inputs.input_image = [ - str(p) - for p in get_template( - template_id, - suffix="probseg", - resolution=resolution, - label=["CSF", "GM", "WM"], - ) - ] - return tpms_std2t1w - - -@pydra.mark.task -def project_segmentation_to_t1w_space(template_id, species, workflow_name): - from niworkflows.interfaces.ants import ApplyTransforms - from mriqc.utils.misc import get_template - - if species.lower() == "human": - resolution = 1 - else: - resolution = None - - tpms_std2t1w = ApplyTransforms( - dimension=3, - default_value=0, - interpolation="Linear", - float=config.execution.ants_float, - ) - tpms_std2t1w.inputs.input_image = [ - str(p) - for p in get_template( - template_id, - suffix="probseg", - resolution=resolution, - label=["CSF", "GM", "WM"], - ) - ] - return tpms_std2t1w - - -@pydra.mark.task -def init_brain_tissue_segmentation(template_id, species, workflow_name): - from nipype.interfaces.ants import Atropos - - workflow = pydra.Workflow(name=workflow_name) - - format_tpm_names = pydra.Node( - name="format_tpm_names", - function=_format_tpm_names, - input_names=["in_files"], - output_names=["file_format"], - iterfield=["in_files"], - ) - format_tpm_names.inputs.in_files = get_template( - template_id, - suffix="probseg", - resolution=(1 if species.lower() == "human" else None), - label=["CSF", "GM", "WM"], - ) - - segment = pydra.Node( - Atropos( - initialization="PriorProbabilityImages", - number_of_tissue_classes=3, - prior_weighting=0.1, - mrf_radius=[1, 1, 1], - mrf_smoothing_factor=0.01, - save_posteriors=True, - out_classified_image_name="segment.nii.gz", - output_posteriors_name_template="segment_%02d.nii.gz", - num_threads=config.nipype.omp_nthreads, - ), - name="segmentation", - ) - - workflow.add(format_tpm_names) - workflow.add(segment) - workflow.connect(format_tpm_names, "file_format", segment, "prior_image") - - return workflow - - -@pydra.mark.task -def compute_iqms(template_id, species, workflow_name): - from niworkflows.interfaces.bids import ReadSidecarJSON - from mriqc.interfaces.anatomical import Harmonize - from mriqc.workflows.utils import _tofloat - from niworkflows.interfaces.bids import ReadSidecarJSON - from mriqc.interfaces.utils import AddProvenance - from mriqc.interfaces.anatomical import ComputeQI2 - from mriqc.workflows.utils import _pop - - inputnode = pydra.Input( - name="inputnode", - spec=[ - "in_file", - "brainmask", - "airmask", - "artmask", - "headmask", - "rotmask", - "hatmask", - "segmentation", - "inu_corrected", - "in_inu", - "pvms", - "metadata", - "std_tpms", - ], - ) - outputnode = pydra.Output(name="outputnode", spec=["out_file", "noisefit"]) - - meta = pydra.Node( - ReadSidecarJSON(index_db=config.execution.bids_database_dir), name="metadata" - ) - - addprov = pydra.Node( - AddProvenance(), name="provenance", run_without_submitting=True - ) - - getqi2 = pydra.Node(ComputeQI2(), name="ComputeQI2") - - measures = pydra.Node(StructuralQC(human=species.lower() == "human"), "measures") - - datasink = pydra.Node( - IQMFileSink( - out_dir=config.execution.output_dir, - dataset=config.execution.dsname, - ), - name="datasink", - run_without_submitting=True, - ) - - workflow = pydra.Workflow(name=workflow_name) - workflow.add(meta, addprov, getqi2, measures, datasink) - - return workflow - - -@pydra.mark.task -def headmsk_wf(name="HeadMaskWorkflow", omp_nthreads=1): - from niworkflows.interfaces.nibabel import ApplyMask - - inputnode = pydra.Input(name="inputnode", spec=["in_file", "brainmask", "in_tpms"]) - outputnode = pydra.Output(name="outputnode", spec=["out_file", "out_denoised"]) - - enhance = pydra.Node( - niu.Function( - input_names=["in_file", "wm_tpm"], - output_names=["out_file"], - function=_enhance, - ), - name="Enhance", - num_threads=omp_nthreads, - ) - - gradient = pydra.Node( - niu.Function( - input_names=["in_file", "brainmask", "sigma"], - output_names=["out_file"], - function=image_gradient, - ), - name="Grad", - num_threads=omp_nthreads, - ) - - thresh = pydra.Node( - niu.Function( - input_names=["in_file", "brainmask", "aniso", "thresh"], - output_names=["out_file"], - function=gradient_threshold, - ), - name="GradientThreshold", - num_threads=omp_nthreads, - ) - - apply_mask = pydra.Node(ApplyMask(), name="apply_mask") - - workflow = pydra.Workflow(name=name) - workflow.add(enhance, gradient, thresh, apply_mask) - - return workflow - # end of draft conversion - - # Project standard TPMs into T1w space - tpms_std2t1w = pe.MapNode( - ApplyTransforms( - dimension=3, - default_value=0, - interpolation="Gaussian", - float=config.execution.ants_float, - ), - iterfield=["input_image"], - name="tpms_std2t1w", - ) - tpms_std2t1w.inputs.input_image = [ - str(p) - for p in get_template( - config.workflow.template_id, - suffix="probseg", - resolution=(1 if config.workflow.species.lower() == "human" else None), - label=["CSF", "GM", "WM"], - ) - ] - - # Project MNI segmentation to T1 space - tpms_std2t1w = pe.MapNode( - ApplyTransforms( - dimension=3, - default_value=0, - interpolation="Linear", - float=config.execution.ants_float, - ), - iterfield=["input_image"], - name="tpms_std2t1w", - ) - tpms_std2t1w.inputs.input_image = [ - str(p) - for p in get_template( - config.workflow.template_id, - suffix="probseg", - resolution=(1 if config.workflow.species.lower() == "human" else None), - label=["CSF", "GM", "WM"], - ) - ] - - # fmt: off - workflow.connect([ - (inputnode, norm, [("moving_image", "moving_image"), - ("moving_mask", "moving_mask"), - ("modality", "reference")]), - (inputnode, tpms_std2t1w, [("moving_image", "reference_image")]), - (norm, tpms_std2t1w, [ - ("inverse_composite_transform", "transforms"), - ]), - (norm, outputnode, [ - ("composite_transform", "ind2std_xfm"), - ("out_report", "out_report"), - ]), - (tpms_std2t1w, outputnode, [("output_image", "out_tpms")]), - ]) - # fmt: on - - return workflow - - -def init_brain_tissue_segmentation(name="brain_tissue_segmentation"): - """ - Setup a workflow for brain tissue segmentation. - - .. workflow:: - - from mriqc.workflows.anatomical.base import init_brain_tissue_segmentation - from mriqc.testing import mock_config - with mock_config(): - wf = init_brain_tissue_segmentation() - - """ - from nipype.interfaces.ants import Atropos - - def _format_tpm_names(in_files, fname_string=None): - from pathlib import Path - import nibabel as nb - import glob - - out_path = Path.cwd().absolute() - - # copy files to cwd and rename iteratively - for count, fname in enumerate(in_files): - img = nb.load(fname) - extension = "".join(Path(fname).suffixes) - out_fname = f"priors_{1 + count:02}{extension}" - nb.save(img, Path(out_path, out_fname)) - - if fname_string is None: - fname_string = f"priors_%02d{extension}" - - out_files = [ - str(prior) - for prior in glob.glob(str(Path(out_path, f"priors*{extension}"))) - ] - - # return path with c-style format string for Atropos - file_format = str(Path(out_path, fname_string)) - return file_format, out_files - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_file", "brainmask", "std_tpms"]), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface(fields=["out_segm", "out_pvms"]), - name="outputnode", - ) - - format_tpm_names = pe.Node( - niu.Function( - input_names=["in_files"], - output_names=["file_format"], - function=_format_tpm_names, - execution={"keep_inputs": True, "remove_unnecessary_outputs": False}, - ), - name="format_tpm_names", - ) - - segment = pe.Node( - Atropos( - initialization="PriorProbabilityImages", - number_of_tissue_classes=3, - prior_weighting=0.1, - mrf_radius=[1, 1, 1], - mrf_smoothing_factor=0.01, - save_posteriors=True, - out_classified_image_name="segment.nii.gz", - output_posteriors_name_template="segment_%02d.nii.gz", - num_threads=config.nipype.omp_nthreads, - ), - name="segmentation", - mem_gb=5, - num_threads=config.nipype.omp_nthreads, - ) - - # fmt: off - workflow.connect([ - (inputnode, segment, [("in_file", "intensity_images"), - ("brainmask", "mask_image")]), - (inputnode, format_tpm_names, [('std_tpms', 'in_files')]), - (format_tpm_names, segment, [(('file_format', _pop), 'prior_image')]), - (segment, outputnode, [("classified_image", "out_segm"), - ("posteriors", "out_pvms")]), - ]) - # fmt: on - return workflow - - -def compute_iqms(name="ComputeIQMs"): - """ - Setup the workflow that actually computes the IQMs. - - .. workflow:: - - from mriqc.workflows.anatomical.base import compute_iqms - from mriqc.testing import mock_config - with mock_config(): - wf = compute_iqms() - - """ - from niworkflows.interfaces.bids import ReadSidecarJSON - - from mriqc.interfaces.anatomical import Harmonize - from mriqc.workflows.utils import _tofloat - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_file", - "in_ras", - "brainmask", - "airmask", - "artmask", - "headmask", - "rotmask", - "hatmask", - "segmentation", - "inu_corrected", - "in_inu", - "pvms", - "metadata", - "std_tpms", - ] - ), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface(fields=["out_file", "noisefit"]), - name="outputnode", - ) - - # Extract metadata - meta = pe.Node( - ReadSidecarJSON(index_db=config.execution.bids_database_dir), name="metadata" - ) - - # Add provenance - addprov = pe.Node(AddProvenance(), name="provenance", run_without_submitting=True) - - # AFNI check smoothing - fwhm_interface = get_fwhmx() - - fwhm = pe.Node(fwhm_interface, name="smoothness") - - # Harmonize - homog = pe.Node(Harmonize(), name="harmonize") - if config.workflow.species.lower() != "human": - homog.inputs.erodemsk = False - homog.inputs.thresh = 0.8 - - # Mortamet's QI2 - getqi2 = pe.Node(ComputeQI2(), name="ComputeQI2") - - # Compute python-coded measures - measures = pe.Node( - StructuralQC(human=config.workflow.species.lower() == "human"), "measures" - ) - - datasink = pe.Node( - IQMFileSink( - out_dir=config.execution.output_dir, - dataset=config.execution.dsname, - ), - name="datasink", - run_without_submitting=True, - ) - - def _getwm(inlist): - return inlist[-1] - - # fmt: off - workflow.connect([ - (inputnode, meta, [("in_file", "in_file")]), - (inputnode, datasink, [("in_file", "in_file"), - (("in_file", _get_mod), "modality")]), - (inputnode, addprov, [(("in_file", _get_mod), "modality")]), - (meta, datasink, [("subject", "subject_id"), - ("session", "session_id"), - ("task", "task_id"), - ("acquisition", "acq_id"), - ("reconstruction", "rec_id"), - ("run", "run_id"), - ("out_dict", "metadata")]), - (inputnode, addprov, [("in_file", "in_file"), - ("airmask", "air_msk"), - ("rotmask", "rot_msk")]), - (inputnode, getqi2, [("in_ras", "in_file"), - ("hatmask", "air_msk")]), - (inputnode, homog, [("inu_corrected", "in_file"), - (("pvms", _getwm), "wm_mask")]), - (inputnode, measures, [("in_inu", "in_bias"), - ("in_ras", "in_file"), - ("airmask", "air_msk"), - ("headmask", "head_msk"), - ("artmask", "artifact_msk"), - ("rotmask", "rot_msk"), - ("segmentation", "in_segm"), - ("pvms", "in_pvms"), - ("std_tpms", "mni_tpms")]), - (inputnode, fwhm, [("in_ras", "in_file"), - ("brainmask", "mask")]), - (homog, measures, [("out_file", "in_noinu")]), - (fwhm, measures, [(("fwhm", _tofloat), "in_fwhm")]), - (measures, datasink, [("out_qc", "root")]), - (addprov, datasink, [("out_prov", "provenance")]), - (getqi2, datasink, [("qi2", "qi_2")]), - (getqi2, outputnode, [("out_file", "noisefit")]), - (datasink, outputnode, [("out_file", "out_file")]), - ]) - # fmt: on - - return workflow - - -def headmsk_wf(name="HeadMaskWorkflow", omp_nthreads=1): - """ - Computes a head mask as in [Mortamet2009]_. - - .. workflow:: - - from mriqc.testing import mock_config - from mriqc.workflows.anatomical.base import headmsk_wf - with mock_config(): - wf = headmsk_wf() - - """ - - from niworkflows.interfaces.nibabel import ApplyMask - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_file", "brainmask", "in_tpms"]), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface(fields=["out_file", "out_denoised"]), name="outputnode" - ) - - def _select_wm(inlist): - return [f for f in inlist if "WM" in f][0] - - enhance = pe.Node( - niu.Function( - input_names=["in_file", "wm_tpm"], - output_names=["out_file"], - function=_enhance, - ), - name="Enhance", - num_threads=omp_nthreads, - ) - - gradient = pe.Node( - niu.Function( - input_names=["in_file", "brainmask", "sigma"], - output_names=["out_file"], - function=image_gradient, - ), - name="Grad", - num_threads=omp_nthreads, - ) - thresh = pe.Node( - niu.Function( - input_names=["in_file", "brainmask", "aniso", "thresh"], - output_names=["out_file"], - function=gradient_threshold, - ), - name="GradientThreshold", - num_threads=omp_nthreads, - ) - if config.workflow.species != "human": - gradient.inputs.sigma = 3.0 - thresh.inputs.aniso = True - thresh.inputs.thresh = 4.0 - - apply_mask = pe.Node(ApplyMask(), name="apply_mask") - - # fmt: off - workflow.connect([ - (inputnode, enhance, [("in_file", "in_file"), - (("in_tpms", _select_wm), "wm_tpm")]), - (inputnode, thresh, [("brainmask", "brainmask")]), - (inputnode, gradient, [("brainmask", "brainmask")]), - (inputnode, apply_mask, [("brainmask", "in_mask")]), - (enhance, gradient, [("out_file", "in_file")]), - (gradient, thresh, [("out_file", "in_file")]), - (enhance, apply_mask, [("out_file", "in_file")]), - (thresh, outputnode, [("out_file", "out_file")]), - (apply_mask, outputnode, [("out_file", "out_denoised")]), - ]) - # fmt: on - - return workflow - - -def airmsk_wf(name="AirMaskWorkflow"): - """ - Calculate air, artifacts and "hat" masks to evaluate noise in the background. - - This workflow mostly addresses the implementation of Step 1 in [Mortamet2009]_. - This work proposes to look at the signal distribution in the background, where - no signals are expected, to evaluate the spread of the noise. - It is in the background where [Mortamet2009]_ proposed to also look at the presence - of ghosts and artifacts, where they are very easy to isolate. - - However, [Mortamet2009]_ proposes not to look at the background around the face - because of the likely signal leakage through the phase-encoding axis sourcing from - eyeballs (and their motion). - To avoid that, [Mortamet2009]_ proposed atlas-based identification of two landmarks - (nasion and cerebellar projection on to the occipital bone). - MRIQC, for simplicity, used a such a mask created in MNI152NLin2009cAsym space and - projected it on to the individual. - Such a solution is inadequate because it doesn't drop full in-plane slices as there - will be a large rotation of the individual's tilt of the head with respect to the - template. - The new implementation (23.1.x series) follows [Mortamet2009]_ more closely, - projecting the two landmarks from the template space and leveraging - *NiTransforms* to do that. - - .. workflow:: - - from mriqc.testing import mock_config - from mriqc.workflows.anatomical.base import airmsk_wf - with mock_config(): - wf = airmsk_wf() - - """ - workflow = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_file", - "in_mask", - "head_mask", - "ind2std_xfm", - ] - ), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface(fields=["hat_mask", "air_mask", "art_mask", "rot_mask"]), - name="outputnode", - ) - - rotmsk = pe.Node(RotationMask(), name="RotationMask") - qi1 = pe.Node(ArtifactMask(), name="ArtifactMask") - - # fmt: off - workflow.connect([ - (inputnode, rotmsk, [("in_file", "in_file")]), - (inputnode, qi1, [("in_file", "in_file"), - ("head_mask", "head_mask"), - ("ind2std_xfm", "ind2std_xfm")]), - (qi1, outputnode, [("out_hat_msk", "hat_mask"), - ("out_air_msk", "air_mask"), - ("out_art_msk", "art_mask")]), - (rotmsk, outputnode, [("out_file", "rot_mask")]) - ]) - # fmt: on - - return workflow - - -def _binarize(in_file, threshold=0.5, out_file=None): - import os.path as op - - import nibabel as nb - import numpy as np - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath(f"{fname}_bin{ext}") - - nii = nb.load(in_file) - data = nii.get_fdata() > threshold - - hdr = nii.header.copy() - hdr.set_data_dtype(np.uint8) - nb.Nifti1Image(data.astype(np.uint8), nii.affine, hdr).to_filename(out_file) - return out_file - - -def _enhance(in_file, wm_tpm, out_file=None): - import numpy as np - import nibabel as nb - from mriqc.workflows.utils import generate_filename - - imnii = nb.load(in_file) - data = imnii.get_fdata(dtype=np.float32) - range_max = np.percentile(data[data > 0], 99.98) - excess = data > range_max - - wm_prob = nb.load(wm_tpm).get_fdata() - wm_prob[wm_prob < 0] = 0 # Ensure no negative values - wm_prob[excess] = 0 # Ensure no outliers are considered - - # Calculate weighted mean and standard deviation - wm_mu = np.average(data, weights=wm_prob) - wm_sigma = np.sqrt(np.average((data - wm_mu) ** 2, weights=wm_prob)) - - # Resample signal excess pixels - data[excess] = np.random.normal(loc=wm_mu, scale=wm_sigma, size=excess.sum()) - - out_file = out_file or str(generate_filename(in_file, suffix="enhanced").absolute()) - nb.Nifti1Image(data, imnii.affine, imnii.header).to_filename(out_file) - return out_file - - -def image_gradient(in_file, brainmask, sigma=4.0, out_file=None): - """Computes the magnitude gradient of an image using numpy""" - import nibabel as nb - import numpy as np - from scipy.ndimage import gaussian_gradient_magnitude as gradient - from mriqc.workflows.utils import generate_filename - - imnii = nb.load(in_file) - mask = np.bool_(nb.load(brainmask).dataobj) - data = imnii.get_fdata(dtype=np.float32) - datamax = np.percentile(data.reshape(-1), 99.5) - data *= 100 / datamax - data[mask] = 100 - - zooms = np.array(imnii.header.get_zooms()[:3]) - sigma_xyz = 2 - zooms / min(zooms) - grad = gradient(data, sigma * sigma_xyz) - gradmax = np.percentile(grad.reshape(-1), 99.5) - grad *= 100.0 - grad /= gradmax - grad[mask] = 100 - - out_file = out_file or str(generate_filename(in_file, suffix="grad").absolute()) - nb.Nifti1Image(grad, imnii.affine, imnii.header).to_filename(out_file) - return out_file - - -def gradient_threshold(in_file, brainmask, thresh=15.0, out_file=None, aniso=False): - """Compute a threshold from the histogram of the magnitude gradient image""" - import nibabel as nb - import numpy as np - from scipy import ndimage as sim - from mriqc.workflows.utils import generate_filename - - if not aniso: - struct = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2) - else: - # Generate an anisotropic binary structure, taking into account slice thickness - img = nb.load(in_file) - zooms = img.header.get_zooms() - dist = max(zooms) - dim = img.header["dim"][0] - - x = np.ones((5) * np.ones(dim, dtype=np.int8)) - np.put(x, x.size // 2, 0) - dist_matrix = np.round(sim.distance_transform_edt(x, sampling=zooms), 5) - struct = dist_matrix <= dist - - imnii = nb.load(in_file) - - hdr = imnii.header.copy() - hdr.set_data_dtype(np.uint8) - - data = imnii.get_fdata(dtype=np.float32) - - mask = np.zeros_like(data, dtype=np.uint8) - mask[data > thresh] = 1 - mask = sim.binary_closing(mask, struct, iterations=2).astype(np.uint8) - mask = sim.binary_erosion(mask, sim.generate_binary_structure(3, 2)).astype( - np.uint8 - ) - - segdata = np.asanyarray(nb.load(brainmask).dataobj) > 0 - segdata = sim.binary_dilation(segdata, struct, iterations=2, border_value=1).astype( - np.uint8 - ) - mask[segdata] = 1 - - # Remove small objects - label_im, nb_labels = sim.label(mask) - artmsk = np.zeros_like(mask) - if nb_labels > 2: - sizes = sim.sum(mask, label_im, list(range(nb_labels + 1))) - ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1)))))) - for _, label in ordered[2:]: - mask[label_im == label] = 0 - artmsk[label_im == label] = 1 - - mask = sim.binary_fill_holes(mask, struct).astype( - np.uint8 - ) # pylint: disable=no-member - - out_file = out_file or str(generate_filename(in_file, suffix="gradmask").absolute()) - nb.Nifti1Image(mask, imnii.affine, hdr).to_filename(out_file) - return out_file - - -def _get_imgtype(in_file): - from pathlib import Path - - return int(Path(in_file).name.rstrip(".gz").rstrip(".nii").split("_")[-1][1]) - - -def _get_mod(in_file): - from pathlib import Path - - return Path(in_file).name.rstrip(".gz").rstrip(".nii").split("_")[-1] - - -def _pop(inlist): - if isinstance(inlist, (list, tuple)): - return inlist[0] - return inlist diff --git a/pydra/tasks/mriqc/workflows/anatomical/output.py b/pydra/tasks/mriqc/workflows/anatomical/output.py deleted file mode 100644 index d85163d..0000000 --- a/pydra/tasks/mriqc/workflows/anatomical/output.py +++ /dev/null @@ -1,279 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2023 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -"""Writing out anatomical reportlets.""" -from mriqc import config -from mriqc.interfaces import DerivativesDataSink -from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu - - -def init_anat_report_wf(name: str = "anat_report_wf"): - """ - Generate the components of the individual report. - - .. workflow:: - - from mriqc.workflows.anatomical.output import init_anat_report_wf - from mriqc.testing import mock_config - with mock_config(): - wf = init_anat_report_wf() - - """ - from nireports.interfaces import PlotMosaic - - # from mriqc.interfaces.reports import IndividualReport - - verbose = config.execution.verbose_reports - reportlets_dir = config.execution.work_dir / "reportlets" - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_ras", - "brainmask", - "headmask", - "airmask", - "artmask", - "rotmask", - "segmentation", - "inu_corrected", - "noisefit", - "in_iqms", - "mni_report", - "api_id", - "name_source", - ] - ), - name="inputnode", - ) - - mosaic_zoom = pe.Node( - PlotMosaic(cmap="Greys_r"), - name="PlotMosaicZoomed", - ) - - mosaic_noise = pe.Node( - PlotMosaic(only_noise=True, cmap="viridis_r"), - name="PlotMosaicNoise", - ) - if config.workflow.species.lower() in ("rat", "mouse"): - mosaic_zoom.inputs.view = ["coronal", "axial"] - mosaic_noise.inputs.view = ["coronal", "axial"] - - ds_report_zoomed = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="zoomed", - datatype="figures", - ), - name="ds_report_zoomed", - run_without_submitting=True, - ) - - ds_report_background = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="background", - datatype="figures", - ), - name="ds_report_background", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - # (inputnode, rnode, [("in_iqms", "in_iqms")]), - (inputnode, mosaic_zoom, [("in_ras", "in_file"), - ("brainmask", "bbox_mask_file")]), - (inputnode, mosaic_noise, [("in_ras", "in_file")]), - (inputnode, ds_report_zoomed, [("name_source", "source_file")]), - (inputnode, ds_report_background, [("name_source", "source_file")]), - (mosaic_zoom, ds_report_zoomed, [("out_file", "in_file")]), - (mosaic_noise, ds_report_background, [("out_file", "in_file")]), - ]) - # fmt: on - - if not verbose: - return workflow - - from nireports.interfaces import PlotContours - - display_mode = "y" if config.workflow.species.lower() in ("rat", "mouse") else "z" - plot_segm = pe.Node( - PlotContours( - display_mode=display_mode, - levels=[0.5, 1.5, 2.5], - cut_coords=10, - colors=["r", "g", "b"], - ), - name="PlotSegmentation", - ) - - ds_report_segm = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="segmentation", - datatype="figures", - ), - name="ds_report_segm", - run_without_submitting=True, - ) - - plot_bmask = pe.Node( - PlotContours( - display_mode=display_mode, - levels=[0.5], - colors=["r"], - cut_coords=10, - out_file="bmask", - ), - name="PlotBrainmask", - ) - - ds_report_bmask = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="brainmask", - datatype="figures", - ), - name="ds_report_bmask", - run_without_submitting=True, - ) - - plot_artmask = pe.Node( - PlotContours( - display_mode=display_mode, - levels=[0.5], - colors=["r"], - cut_coords=10, - out_file="artmask", - saturate=True, - ), - name="PlotArtmask", - ) - - ds_report_artmask = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="artifacts", - datatype="figures", - ), - name="ds_report_artmask", - run_without_submitting=True, - ) - - # NOTE: humans switch on these two to coronal view. - display_mode = "y" if config.workflow.species.lower() in ("rat", "mouse") else "x" - plot_airmask = pe.Node( - PlotContours( - display_mode=display_mode, - levels=[0.5], - colors=["r"], - cut_coords=6, - out_file="airmask", - ), - name="PlotAirmask", - ) - - ds_report_airmask = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="airmask", - datatype="figures", - ), - name="ds_report_airmask", - run_without_submitting=True, - ) - - plot_headmask = pe.Node( - PlotContours( - display_mode=display_mode, - levels=[0.5], - colors=["r"], - cut_coords=6, - out_file="headmask", - ), - name="PlotHeadmask", - ) - - ds_report_headmask = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="head", - datatype="figures", - ), - name="ds_report_headmask", - run_without_submitting=True, - ) - - ds_report_norm = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="norm", - datatype="figures", - ), - name="ds_report_norm", - run_without_submitting=True, - ) - - ds_report_noisefit = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="noisefit", - datatype="figures", - ), - name="ds_report_noisefit", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, ds_report_segm, [("name_source", "source_file")]), - (inputnode, ds_report_bmask, [("name_source", "source_file")]), - (inputnode, ds_report_artmask, [("name_source", "source_file")]), - (inputnode, ds_report_airmask, [("name_source", "source_file")]), - (inputnode, ds_report_headmask, [("name_source", "source_file")]), - (inputnode, ds_report_norm, [("mni_report", "in_file"), - ("name_source", "source_file")]), - (inputnode, ds_report_noisefit, [("noisefit", "in_file"), - ("name_source", "source_file")]), - (inputnode, plot_segm, [("in_ras", "in_file"), - ("segmentation", "in_contours")]), - (inputnode, plot_bmask, [("in_ras", "in_file"), - ("brainmask", "in_contours")]), - (inputnode, plot_headmask, [("in_ras", "in_file"), - ("headmask", "in_contours")]), - (inputnode, plot_airmask, [("in_ras", "in_file"), - ("airmask", "in_contours")]), - (inputnode, plot_artmask, [("in_ras", "in_file"), - ("artmask", "in_contours")]), - (plot_bmask, ds_report_bmask, [("out_file", "in_file")]), - (plot_segm, ds_report_segm, [("out_file", "in_file")]), - (plot_artmask, ds_report_artmask, [("out_file", "in_file")]), - (plot_headmask, ds_report_headmask, [("out_file", "in_file")]), - (plot_airmask, ds_report_airmask, [("out_file", "in_file")]), - ]) - # fmt: on - - return workflow diff --git a/pydra/tasks/mriqc/workflows/core.py b/pydra/tasks/mriqc/workflows/core.py deleted file mode 100644 index 5aa5308..0000000 --- a/pydra/tasks/mriqc/workflows/core.py +++ /dev/null @@ -1,59 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -""" -Combines the structural and functional MRI workflows. -""" -from nipype.pipeline.engine import Workflow -from mriqc.workflows.anatomical.base import anat_qc_workflow -from mriqc.workflows.functional.base import fmri_qc_workflow -from mriqc.workflows.diffusion.base import dmri_qc_workflow - -ANATOMICAL_KEYS = "t1w", "t2w" -FMRI_KEY = "bold" -DMRI_KEY = "dwi" - - -def init_mriqc_wf(): - """Create a multi-subject MRIQC workflow.""" - from mriqc import config - - # Create parent workflow - workflow = Workflow(name="mriqc_wf") - workflow.base_dir = config.execution.work_dir - - # Create fMRI QC workflow - if FMRI_KEY in config.workflow.inputs: - workflow.add_nodes([fmri_qc_workflow()]) - - # Create dMRI QC workflow - if DMRI_KEY in config.workflow.inputs: - workflow.add_nodes([dmri_qc_workflow()]) - - # Create sMRI QC workflow - input_keys = config.workflow.inputs.keys() - if any(key in input_keys for key in ANATOMICAL_KEYS): - workflow.add_nodes([anat_qc_workflow()]) - - # Return non-empty workflow, else None - if workflow._get_all_nodes(): - return workflow diff --git a/pydra/tasks/mriqc/workflows/diffusion/__init__.py b/pydra/tasks/mriqc/workflows/diffusion/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pydra/tasks/mriqc/workflows/diffusion/base.py b/pydra/tasks/mriqc/workflows/diffusion/base.py deleted file mode 100644 index 76573f5..0000000 --- a/pydra/tasks/mriqc/workflows/diffusion/base.py +++ /dev/null @@ -1,654 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2023 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -""" -Diffusion MRI workflow -====================== - -.. image :: _static/diffusion_workflow_source.svg - -The diffusion workflow follows the following steps: - -#. Sanitize (revise data types and xforms) input data, read - associated metadata and discard non-steady state frames. -#. :abbr:`HMC (head-motion correction)` based on ``3dvolreg`` from - AFNI -- :py:func:`hmc`. -#. Skull-stripping of the time-series (AFNI) -- - :py:func:`dmri_bmsk_workflow`. -#. Calculate mean time-series, and :abbr:`tSNR (temporal SNR)`. -#. Spatial Normalization to MNI (ANTs) -- :py:func:`epi_mni_align` -#. Extraction of IQMs -- :py:func:`compute_iqms`. -#. Individual-reports generation -- - :py:func:`~mriqc.workflows.diffusion.output.init_dwi_report_wf`. - -This workflow is orchestrated by :py:func:`dmri_qc_workflow`. -""" -from mriqc import config -from nipype.interfaces import utility as niu -from nipype.pipeline import engine as pe -from mriqc.interfaces.datalad import DataladIdentityInterface -from mriqc.workflows.diffusion.output import init_dwi_report_wf - -DEFAULT_MEMORY_MIN_GB = 0.01 - - -def dmri_qc_workflow(name="dwiMRIQC"): - """ - Initialize the dMRI-QC workflow. - - .. workflow:: - - import os.path as op - from mriqc.workflows.diffusion.base import dmri_qc_workflow - from mriqc.testing import mock_config - with mock_config(): - wf = dmri_qc_workflow() - - """ - from nipype.interfaces.afni import Volreg - from nipype.interfaces.mrtrix3.preprocess import DWIDenoise - from niworkflows.interfaces.header import SanitizeImage - from mriqc.interfaces.diffusion import ( - CorrectSignalDrift, - DipyDTI, - ExtractB0, - FilterShells, - NumberOfShells, - ReadDWIMetadata, - WeightedStat, - ) - from mriqc.workflows.shared import synthstrip_wf as dmri_bmsk_workflow - from mriqc.messages import BUILDING_WORKFLOW - - workflow = pe.Workflow(name=name) - - mem_gb = config.workflow.biggest_file_gb - - dataset = config.workflow.inputs.get("dwi", []) - - message = BUILDING_WORKFLOW.format( - modality="diffusion", - detail=( - f"for {len(dataset)} NIfTI files." - if len(dataset) > 2 - else f"({' and '.join(('<%s>' % v for v in dataset))})." - ), - ) - config.loggers.workflow.info(message) - - # Define workflow, inputs and outputs - # 0. Get data, put it in RAS orientation - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") - inputnode.iterables = [("in_file", dataset)] - - datalad_get = pe.Node( - DataladIdentityInterface(fields=["in_file"], dataset_path=config.execution.bids_dir), - name="datalad_get", - ) - - outputnode = pe.Node( - niu.IdentityInterface(fields=["qc", "mosaic", "out_group", "out_dvars", "out_fd"]), - name="outputnode", - ) - - sanitize = pe.Node( - SanitizeImage( - n_volumes_to_discard=0, - max_32bit=config.execution.float32, - ), - name="sanitize", - mem_gb=mem_gb * 4.0, - ) - - # Workflow -------------------------------------------------------- - - # 1. Read metadata & bvec/bval, estimate number of shells, extract and split B0s - meta = pe.Node(ReadDWIMetadata(index_db=config.execution.bids_database_dir), name="metadata") - shells = pe.Node(NumberOfShells(), name="shells") - get_shells = pe.MapNode(ExtractB0(), name="get_shells", iterfield=["b0_ixs"]) - hmc_shells = pe.MapNode( - Volreg(args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ"), - name="hmc_shells", - mem_gb=mem_gb * 2.5, - iterfield=["in_file"], - ) - - hmc_b0 = pe.Node( - Volreg(args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ"), - name="hmc_b0", - mem_gb=mem_gb * 2.5, - ) - - drift = pe.Node(CorrectSignalDrift(), name="drift") - - # 2. Generate B0 reference - dwi_reference_wf = init_dmriref_wf(name="dwi_reference_wf") - - # 3. Calculate brainmask - dmri_bmsk = dmri_bmsk_workflow(omp_nthreads=config.nipype.omp_nthreads) - - # 4. HMC: head motion correct - hmcwf = hmc_workflow() - - # 5. Split shells and compute some stats - averages = pe.MapNode( - WeightedStat(), - name="averages", - mem_gb=mem_gb * 1.5, - iterfield=["in_weights"], - ) - stddev = pe.MapNode( - WeightedStat(stat="std"), - name="stddev", - mem_gb=mem_gb * 1.5, - iterfield=["in_weights"], - ) - - # 6. Fit DTI model - dti_filter = pe.Node(FilterShells(), name="dti_filter") - dwidenoise = pe.Node( - DWIDenoise( - noise="noisemap.nii.gz", - nthreads=config.nipype.omp_nthreads, - ), - name="dwidenoise", - nprocs=config.nipype.omp_nthreads, - ) - dti = pe.Node( - DipyDTI(free_water_model=False), - name="dti", - ) - - # 7. EPI to MNI registration - ema = epi_mni_align() - - # 8. Compute IQMs - iqmswf = compute_iqms() - - # 9. Generate outputs - dwi_report_wf = init_dwi_report_wf() - - # fmt: off - workflow.connect([ - (inputnode, datalad_get, [("in_file", "in_file")]), - (inputnode, meta, [("in_file", "in_file")]), - (inputnode, dwi_report_wf, [ - ("in_file", "inputnode.name_source"), - ]), - (datalad_get, iqmswf, [("in_file", "inputnode.in_file")]), - (datalad_get, sanitize, [("in_file", "in_file")]), - (sanitize, dwi_reference_wf, [("out_file", "inputnode.in_file")]), - (shells, dwi_reference_wf, [(("b_masks", _first), "inputnode.t_mask")]), - (meta, shells, [("out_bval_file", "in_bvals")]), - (sanitize, drift, [("out_file", "full_epi")]), - (shells, get_shells, [("b_indices", "b0_ixs")]), - (sanitize, get_shells, [("out_file", "in_file")]), - (meta, drift, [("out_bval_file", "bval_file")]), - (get_shells, hmc_shells, [(("out_file", _all_but_first), "in_file")]), - (get_shells, hmc_b0, [(("out_file", _first), "in_file")]), - (dwi_reference_wf, hmc_b0, [("outputnode.ref_file", "basefile")]), - (hmc_b0, drift, [("out_file", "in_file")]), - (shells, drift, [(("b_indices", _first), "b0_ixs")]), - (dwi_reference_wf, dmri_bmsk, [("outputnode.ref_file", "inputnode.in_files")]), - (dwi_reference_wf, ema, [("outputnode.ref_file", "inputnode.epi_mean")]), - (dmri_bmsk, drift, [("outputnode.out_mask", "brainmask_file")]), - (dmri_bmsk, ema, [("outputnode.out_mask", "inputnode.epi_mask")]), - (drift, hmcwf, [("out_full_file", "inputnode.reference")]), - (drift, averages, [("out_full_file", "in_file")]), - (drift, stddev, [("out_full_file", "in_file")]), - (shells, averages, [("b_masks", "in_weights")]), - (shells, stddev, [("b_masks", "in_weights")]), - (shells, dti_filter, [("out_data", "bvals")]), - (meta, dti_filter, [("out_bvec_file", "bvec_file")]), - (drift, dti_filter, [("out_full_file", "in_file")]), - (dti_filter, dti, [("out_bvals", "bvals")]), - (dti_filter, dti, [("out_bvec_file", "bvec_file")]), - (dti_filter, dwidenoise, [("out_file", "in_file")]), - (dmri_bmsk, dwidenoise, [("outputnode.out_mask", "mask")]), - (dwidenoise, dti, [("out_file", "in_file")]), - (dmri_bmsk, dti, [("outputnode.out_mask", "brainmask")]), - (hmcwf, outputnode, [("outputnode.out_fd", "out_fd")]), - (shells, iqmswf, [("n_shells", "inputnode.n_shells"), - ("b_values", "inputnode.b_values")]), - (dwidenoise, dwi_report_wf, [("noise", "inputnode.in_noise")]), - (shells, dwi_report_wf, [("b_dict", "inputnode.in_bdict")]), - (dmri_bmsk, dwi_report_wf, [("outputnode.out_mask", "inputnode.brainmask")]), - (shells, dwi_report_wf, [("b_values", "inputnode.in_shells")]), - (averages, dwi_report_wf, [("out_file", "inputnode.in_avgmap")]), - (stddev, dwi_report_wf, [("out_file", "inputnode.in_stdmap")]), - (drift, dwi_report_wf, [("out_full_file", "inputnode.in_epi")]), - (dti, dwi_report_wf, [("out_fa", "inputnode.in_fa"), - ("out_md", "inputnode.in_md")]), - (ema, dwi_report_wf, [("outputnode.epi_parc", "inputnode.in_parcellation")]), - ]) - # fmt: on - return workflow - - -def compute_iqms(name="ComputeIQMs"): - """ - Initialize the workflow that actually computes the IQMs. - - .. workflow:: - - from mriqc.workflows.diffusion.base import compute_iqms - from mriqc.testing import mock_config - with mock_config(): - wf = compute_iqms() - - """ - from niworkflows.interfaces.bids import ReadSidecarJSON - - from mriqc.interfaces.reports import AddProvenance - from mriqc.interfaces import IQMFileSink - - # from mriqc.workflows.utils import _tofloat, get_fwhmx - # mem_gb = config.workflow.biggest_file_gb - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_file", - "n_shells", - "b_values", - ] - ), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "out_file", - "meta_sidecar", - ] - ), - name="outputnode", - ) - - meta = pe.Node(ReadSidecarJSON(index_db=config.execution.bids_database_dir), name="metadata") - - addprov = pe.Node( - AddProvenance(modality="dwi"), - name="provenance", - run_without_submitting=True, - ) - - # Save to JSON file - datasink = pe.Node( - IQMFileSink( - modality="dwi", - out_dir=str(config.execution.output_dir), - dataset=config.execution.dsname, - ), - name="datasink", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, datasink, [("in_file", "in_file"), - ("n_shells", "NumberOfShells"), - ("b_values", "b-values")]), - (inputnode, meta, [("in_file", "in_file")]), - (inputnode, addprov, [("in_file", "in_file")]), - (addprov, datasink, [("out_prov", "provenance")]), - (meta, datasink, [("subject", "subject_id"), - ("session", "session_id"), - ("task", "task_id"), - ("acquisition", "acq_id"), - ("reconstruction", "rec_id"), - ("run", "run_id"), - ("out_dict", "metadata")]), - (datasink, outputnode, [("out_file", "out_file")]), - (meta, outputnode, [("out_dict", "meta_sidecar")]), - ]) - # fmt: on - - # Set FD threshold - # inputnode.inputs.fd_thres = config.workflow.fd_thres - - # # AFNI quality measures - # fwhm_interface = get_fwhmx() - # fwhm = pe.Node(fwhm_interface, name="smoothness") - # # fwhm.inputs.acf = True # add when AFNI >= 16 - # measures = pe.Node(FunctionalQC(), name="measures", mem_gb=mem_gb * 3) - - # # fmt: off - # workflow.connect([ - # (inputnode, measures, [("epi_mean", "in_epi"), - # ("brainmask", "in_mask"), - # ("hmc_epi", "in_hmc"), - # ("hmc_fd", "in_fd"), - # ("fd_thres", "fd_thres"), - # ("in_tsnr", "in_tsnr")]), - # (inputnode, fwhm, [("epi_mean", "in_file"), - # ("brainmask", "mask")]), - # (fwhm, measures, [(("fwhm", _tofloat), "in_fwhm")]), - # (measures, datasink, [("out_qc", "root")]), - # ]) - # # fmt: on - return workflow - - -def init_dmriref_wf( - in_file=None, - name="init_dmriref_wf", -): - """ - Build a workflow that generates reference images for a dMRI series. - - The raw reference image is the target of :abbr:`HMC (head motion correction)`, and a - contrast-enhanced reference is the subject of distortion correction, as well as - boundary-based registration to T1w and template spaces. - - This workflow assumes only one dMRI file has been passed. - - Workflow Graph - .. workflow:: - :graph2use: orig - :simple_form: yes - - from mriqc.workflows.diffusion.base import init_dmriref_wf - wf = init_dmriref_wf() - - Parameters - ---------- - in_file : :obj:`str` - dMRI series NIfTI file - ------ - in_file : str - series NIfTI file - - Outputs - ------- - in_file : str - Validated DWI series NIfTI file - ref_file : str - Reference image to which DWI series is motion corrected - """ - from niworkflows.interfaces.images import RobustAverage - from niworkflows.interfaces.header import ValidateImage - - workflow = pe.Workflow(name=name) - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file", "t_mask"]), name="inputnode") - outputnode = pe.Node( - niu.IdentityInterface(fields=["in_file", "ref_file", "validation_report"]), - name="outputnode", - ) - - # Simplify manually setting input image - if in_file is not None: - inputnode.inputs.in_file = in_file - - val_bold = pe.Node( - ValidateImage(), - name="val_bold", - mem_gb=DEFAULT_MEMORY_MIN_GB, - ) - - gen_avg = pe.Node(RobustAverage(mc_method=None), name="gen_avg", mem_gb=1) - # fmt: off - workflow.connect([ - (inputnode, val_bold, [("in_file", "in_file")]), - (inputnode, gen_avg, [("t_mask", "t_mask")]), - (val_bold, gen_avg, [("out_file", "in_file")]), - (gen_avg, outputnode, [("out_file", "ref_file")]), - ]) - # fmt: on - - return workflow - - -def hmc_workflow(name="dMRI_HMC"): - """ - Create a :abbr:`HMC (head motion correction)` workflow for dMRI. - - .. workflow:: - - from mriqc.workflows.diffusion.base import hmc - from mriqc.testing import mock_config - with mock_config(): - wf = hmc() - - """ - from nipype.algorithms.confounds import FramewiseDisplacement - from nipype.interfaces.afni import Volreg - - mem_gb = config.workflow.biggest_file_gb - - workflow = pe.Workflow(name=name) - - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file", "reference"]), name="inputnode") - outputnode = pe.Node(niu.IdentityInterface(fields=["out_file", "out_fd"]), name="outputnode") - - # calculate hmc parameters - hmc = pe.Node( - Volreg(args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ"), - name="motion_correct", - mem_gb=mem_gb * 2.5, - ) - - # Compute the frame-wise displacement - fdnode = pe.Node( - FramewiseDisplacement( - normalize=False, - parameter_source="AFNI", - radius=config.workflow.fd_radius, - ), - name="ComputeFD", - ) - - # fmt: off - workflow.connect([ - (inputnode, hmc, [("in_file", "in_file"), - ("reference", "basefile")]), - (hmc, outputnode, [("out_file", "out_file")]), - (hmc, fdnode, [("oned_file", "in_file")]), - (fdnode, outputnode, [("out_file", "out_fd")]), - ]) - # fmt: on - return workflow - - -def epi_mni_align(name="SpatialNormalization"): - """ - Estimate the transform that maps the EPI space into MNI152NLin2009cAsym. - - The input epi_mean is the averaged and brain-masked EPI timeseries - - Returns the EPI mean resampled in MNI space (for checking out registration) and - the associated "lobe" parcellation in EPI space. - - .. workflow:: - - from mriqc.workflows.diffusion.base import epi_mni_align - from mriqc.testing import mock_config - with mock_config(): - wf = epi_mni_align() - - """ - from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection - from niworkflows.interfaces.reportlets.registration import ( - SpatialNormalizationRPT as RobustMNINormalization, - ) - from templateflow.api import get as get_template - - # Get settings - testing = config.execution.debug - n_procs = config.nipype.nprocs - ants_nthreads = config.nipype.omp_nthreads - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["epi_mean", "epi_mask"]), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface(fields=["epi_mni", "epi_parc", "report"]), - name="outputnode", - ) - - n4itk = pe.Node(N4BiasFieldCorrection(dimension=3, copy_header=True), name="SharpenEPI") - - norm = pe.Node( - RobustMNINormalization( - explicit_masking=False, - flavor="testing" if testing else "precise", - float=config.execution.ants_float, - generate_report=True, - moving="boldref", - num_threads=ants_nthreads, - reference="boldref", - template=config.workflow.template_id, - ), - name="EPI2MNI", - num_threads=n_procs, - mem_gb=3, - ) - - if config.workflow.species.lower() == "human": - norm.inputs.reference_image = str( - get_template(config.workflow.template_id, resolution=2, suffix="boldref") - ) - norm.inputs.reference_mask = str( - get_template( - config.workflow.template_id, - resolution=2, - desc="brain", - suffix="mask", - ) - ) - # adapt some population-specific settings - else: - from nirodents.workflows.brainextraction import _bspline_grid - - n4itk.inputs.shrink_factor = 1 - n4itk.inputs.n_iterations = [50] * 4 - norm.inputs.reference_image = str(get_template(config.workflow.template_id, suffix="T2w")) - norm.inputs.reference_mask = str( - get_template( - config.workflow.template_id, - desc="brain", - suffix="mask", - )[0] - ) - - bspline_grid = pe.Node(niu.Function(function=_bspline_grid), name="bspline_grid") - - # fmt: off - workflow.connect([ - (inputnode, bspline_grid, [('epi_mean', 'in_file')]), - (bspline_grid, n4itk, [('out', 'args')]) - ]) - # fmt: on - - # Warp segmentation into EPI space - invt = pe.Node( - ApplyTransforms( - float=True, - dimension=3, - default_value=0, - interpolation="MultiLabel", - ), - name="ResampleSegmentation", - ) - - if config.workflow.species.lower() == "human": - invt.inputs.input_image = str( - get_template( - config.workflow.template_id, - resolution=1, - desc="carpet", - suffix="dseg", - ) - ) - else: - invt.inputs.input_image = str( - get_template( - config.workflow.template_id, - suffix="dseg", - )[-1] - ) - - # fmt: off - workflow.connect([ - (inputnode, invt, [("epi_mean", "reference_image")]), - (inputnode, n4itk, [("epi_mean", "input_image")]), - (n4itk, norm, [("output_image", "moving_image")]), - (norm, invt, [ - ("inverse_composite_transform", "transforms")]), - (invt, outputnode, [("output_image", "epi_parc")]), - (norm, outputnode, [("warped_image", "epi_mni"), - ("out_report", "report")]), - ]) - # fmt: on - - if config.workflow.species.lower() == "human": - workflow.connect([(inputnode, norm, [("epi_mask", "moving_mask")])]) - - return workflow - - -def _mean(inlist): - import numpy as np - - return np.mean(inlist) - - -def _parse_tqual(in_file): - import numpy as np - - with open(in_file, "r") as fin: - lines = fin.readlines() - return np.mean([float(line.strip()) for line in lines if not line.startswith("++")]) - - -def _parse_tout(in_file): - import numpy as np - - data = np.loadtxt(in_file) # pylint: disable=no-member - return data.mean() - - -def _tolist(value): - return [value] - - -def _get_bvals(bmatrix): - import numpy - - return numpy.squeeze(bmatrix[:, -1]).tolist() - - -def _first(inlist): - if isinstance(inlist, (list, tuple)): - return inlist[0] - - return inlist - - -def _all_but_first(inlist): - if isinstance(inlist, (list, tuple)): - return inlist[1:] - - return inlist diff --git a/pydra/tasks/mriqc/workflows/diffusion/output.py b/pydra/tasks/mriqc/workflows/diffusion/output.py deleted file mode 100644 index d7346be..0000000 --- a/pydra/tasks/mriqc/workflows/diffusion/output.py +++ /dev/null @@ -1,437 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2023 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -"""Writing out diffusion reportlets.""" -from mriqc import config -from mriqc.interfaces import DerivativesDataSink - -from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu -from nireports.interfaces.reporting.base import ( - SimpleBeforeAfterRPT as SimpleBeforeAfter, -) -from nireports.interfaces.dmri import DWIHeatmap - - -def init_dwi_report_wf(name="dwi_report_wf"): - """ - Write out individual reportlets. - - .. workflow:: - - from mriqc.workflows.diffusion.output import init_dwi_report_wf - from mriqc.testing import mock_config - with mock_config(): - wf = init_dwi_report_wf() - - """ - from nireports.interfaces import FMRISummary - from niworkflows.interfaces.morphology import BinaryDilation, BinarySubtraction - - from nireports.interfaces import PlotMosaic, PlotSpikes - - # from mriqc.interfaces.reports import IndividualReport - - verbose = config.execution.verbose_reports - mem_gb = config.workflow.biggest_file_gb - reportlets_dir = config.execution.work_dir / "reportlets" - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_epi", - "brainmask", - "in_avgmap", - "in_stdmap", - "in_shells", - "in_fa", - "in_md", - "in_parcellation", - "in_bdict", - "in_noise", - "name_source", - ] - ), - name="inputnode", - ) - - estimate_sigma = pe.Node( - niu.Function(function=_estimate_sigma), - name="estimate_sigma", - ) - - # Set FD threshold - # inputnode.inputs.fd_thres = config.workflow.fd_thres - - mosaic_fa = pe.Node( - PlotMosaic(cmap="Greys_r"), - name="mosaic_fa", - ) - mosaic_md = pe.Node( - PlotMosaic(cmap="Greys_r"), - name="mosaic_md", - ) - - mosaic_snr = pe.MapNode( - SimpleBeforeAfter( - fixed_params={"cmap": "viridis"}, - moving_params={"cmap": "Greys_r"}, - before_label="Average", - after_label="Standard Deviation", - dismiss_affine=True, - ), - name="mosaic_snr", - iterfield=["before", "after"], - ) - - mosaic_noise = pe.MapNode( - PlotMosaic( - only_noise=True, - cmap="viridis_r", - ), - name="mosaic_noise", - iterfield=["in_file"], - ) - - if config.workflow.species.lower() in ("rat", "mouse"): - mosaic_noise.inputs.view = ["coronal", "axial"] - mosaic_fa.inputs.view = ["coronal", "axial"] - mosaic_md.inputs.view = ["coronal", "axial"] - - ds_report_snr = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="avgstd", - datatype="figures", - allowed_entities=("bval",), - ), - name="ds_report_snr", - run_without_submitting=True, - iterfield=["in_file", "bval"], - ) - - ds_report_noise = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="background", - datatype="figures", - allowed_entities=("bval",), - ), - name="ds_report_noise", - run_without_submitting=True, - iterfield=["in_file", "bval"], - ) - - ds_report_fa = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="fa", - datatype="figures", - ), - name="ds_report_fa", - run_without_submitting=True, - ) - - ds_report_md = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="ad", - datatype="figures", - ), - name="ds_report_md", - run_without_submitting=True, - ) - - def _gen_entity(inlist): - return ["00000"] + [f"{int(round(bval, 0)):05d}" for bval in inlist] - - # fmt: off - workflow.connect([ - (inputnode, mosaic_snr, [("in_avgmap", "before"), - ("in_stdmap", "after"), - ("brainmask", "wm_seg")]), - (inputnode, mosaic_noise, [("in_avgmap", "in_file")]), - (inputnode, mosaic_fa, [("in_fa", "in_file"), - ("brainmask", "bbox_mask_file")]), - (inputnode, mosaic_md, [("in_md", "in_file"), - ("brainmask", "bbox_mask_file")]), - (inputnode, ds_report_snr, [("name_source", "source_file"), - (("in_shells", _gen_entity), "bval")]), - (inputnode, ds_report_noise, [("name_source", "source_file"), - (("in_shells", _gen_entity), "bval")]), - (inputnode, ds_report_fa, [("name_source", "source_file")]), - (inputnode, ds_report_md, [("name_source", "source_file")]), - (mosaic_snr, ds_report_snr, [("out_report", "in_file")]), - (mosaic_noise, ds_report_noise, [("out_file", "in_file")]), - (mosaic_fa, ds_report_fa, [("out_file", "in_file")]), - (mosaic_md, ds_report_md, [("out_file", "in_file")]), - ]) - # fmt: on - - get_wm = pe.Node(niu.Function(function=_get_wm), name="get_wm") - plot_heatmap = pe.Node( - DWIHeatmap(scalarmap_label="Shell-wise Fractional Anisotropy (FA)"), - name="plot_heatmap", - ) - ds_report_hm = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="heatmap", - datatype="figures", - ), - name="ds_report_hm", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, get_wm, [("in_parcellation", "in_file")]), - (inputnode, plot_heatmap, [("in_epi", "in_file"), - ("in_fa", "scalarmap"), - ("in_bdict", "b_indices")]), - (inputnode, ds_report_hm, [("name_source", "source_file")]), - (inputnode, estimate_sigma, [("in_noise", "in_file"), - ("brainmask", "mask")]), - (estimate_sigma, plot_heatmap, [("out", "sigma")]), - (get_wm, plot_heatmap, [("out", "mask_file")]), - (plot_heatmap, ds_report_hm, [("out_file", "in_file")]), - - ]) - # fmt: on - - if True: - return workflow - - # Generate crown mask - # Create the crown mask - dilated_mask = pe.Node(BinaryDilation(), name="dilated_mask") - subtract_mask = pe.Node(BinarySubtraction(), name="subtract_mask") - parcels = pe.Node(niu.Function(function=_carpet_parcellation), name="parcels") - - bigplot = pe.Node(FMRISummary(), name="BigPlot", mem_gb=mem_gb * 3.5) - - ds_report_carpet = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="carpet", - datatype="figures", - ), - name="ds_report_carpet", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - # (inputnode, rnode, [("in_iqms", "in_iqms")]), - (inputnode, bigplot, [("hmc_epi", "in_func"), - ("hmc_fd", "fd"), - ("fd_thres", "fd_thres"), - ("in_dvars", "dvars"), - ("outliers", "outliers"), - (("meta_sidecar", _get_tr), "tr")]), - (inputnode, parcels, [("epi_parc", "segmentation")]), - (inputnode, dilated_mask, [("brainmask", "in_mask")]), - (inputnode, subtract_mask, [("brainmask", "in_subtract")]), - (dilated_mask, subtract_mask, [("out_mask", "in_base")]), - (subtract_mask, parcels, [("out_mask", "crown_mask")]), - (parcels, bigplot, [("out", "in_segm")]), - (inputnode, ds_report_carpet, [("name_source", "source_file")]), - (bigplot, ds_report_carpet, [("out_file", "in_file")]), - ]) - # fmt: on - - if config.workflow.fft_spikes_detector: - mosaic_spikes = pe.Node( - PlotSpikes( - out_file="plot_spikes.svg", - cmap="viridis", - title="High-Frequency spikes", - ), - name="PlotSpikes", - ) - - ds_report_spikes = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="spikes", - datatype="figures", - ), - name="ds_report_spikes", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, ds_report_spikes, [("name_source", "source_file")]), - (inputnode, mosaic_spikes, [("in_ras", "in_file"), - ("in_spikes", "in_spikes"), - ("in_fft", "in_fft")]), - (mosaic_spikes, ds_report_spikes, [("out_file", "in_file")]), - ]) - # fmt: on - - if not verbose: - return workflow - - # Verbose-reporting goes here - from nireports.interfaces import PlotContours - - mosaic_zoom = pe.Node( - PlotMosaic( - cmap="Greys_r", - ), - name="PlotMosaicZoomed", - ) - - plot_bmask = pe.Node( - PlotContours( - display_mode="y" if config.workflow.species.lower() in ("rat", "mouse") else "z", - levels=[0.5], - colors=["r"], - cut_coords=10, - out_file="bmask", - ), - name="PlotBrainmask", - ) - - ds_report_zoomed = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="zoomed", - datatype="figures", - ), - name="ds_report_zoomed", - run_without_submitting=True, - ) - - ds_report_background = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="background", - datatype="figures", - ), - name="ds_report_background", - run_without_submitting=True, - ) - - ds_report_bmask = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="brainmask", - datatype="figures", - ), - name="ds_report_bmask", - run_without_submitting=True, - ) - - ds_report_norm = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="norm", - datatype="figures", - ), - name="ds_report_norm", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, ds_report_norm, [("mni_report", "in_file"), - ("name_source", "source_file")]), - (inputnode, plot_bmask, [("epi_mean", "in_file"), - ("brainmask", "in_contours")]), - (inputnode, mosaic_zoom, [("epi_mean", "in_file"), - ("brainmask", "bbox_mask_file")]), - (inputnode, mosaic_noise, [("epi_mean", "in_file")]), - (inputnode, ds_report_zoomed, [("name_source", "source_file")]), - (inputnode, ds_report_background, [("name_source", "source_file")]), - (inputnode, ds_report_bmask, [("name_source", "source_file")]), - (mosaic_zoom, ds_report_zoomed, [("out_file", "in_file")]), - (mosaic_noise, ds_report_background, [("out_file", "in_file")]), - (plot_bmask, ds_report_bmask, [("out_file", "in_file")]), - ]) - # fmt: on - - return workflow - - -def _carpet_parcellation(segmentation, crown_mask): - """Generate the union of two masks.""" - from pathlib import Path - import numpy as np - import nibabel as nb - - img = nb.load(segmentation) - - lut = np.zeros((256,), dtype="uint8") - lut[100:201] = 1 # Ctx GM - lut[30:99] = 2 # dGM - lut[1:11] = 3 # WM+CSF - lut[255] = 4 # Cerebellum - # Apply lookup table - seg = lut[np.asanyarray(img.dataobj, dtype="uint16")] - seg[np.asanyarray(nb.load(crown_mask).dataobj, dtype=int) > 0] = 5 - - outimg = img.__class__(seg.astype("uint8"), img.affine, img.header) - outimg.set_data_dtype("uint8") - out_file = Path("segments.nii.gz").absolute() - outimg.to_filename(out_file) - return str(out_file) - - -def _get_tr(meta_dict): - return meta_dict.get("RepetitionTime", None) - - -def _get_wm(in_file, radius=2): - from pathlib import Path - import numpy as np - import nibabel as nb - from nipype.utils.filemanip import fname_presuffix - from scipy import ndimage as ndi - from skimage.morphology import ball - - parc = nb.load(in_file) - hdr = parc.header.copy() - data = np.array(parc.dataobj, dtype=hdr.get_data_dtype()) - wm_mask = ndi.binary_erosion((data == 1) | (data == 2), ball(radius)) - - hdr.set_data_dtype(np.uint8) - out_wm = fname_presuffix(in_file, suffix="wm", newpath=str(Path.cwd())) - parc.__class__( - wm_mask.astype(np.uint8), - parc.affine, - hdr, - ).to_filename(out_wm) - return out_wm - - -def _estimate_sigma(in_file, mask): - import numpy as np - import nibabel as nb - - msk = np.asanyarray(nb.load(mask).dataobj) > 0.5 - - return float( - np.median(nb.load(in_file).get_fdata()[msk]) - ) diff --git a/pydra/tasks/mriqc/workflows/functional/__init__.py b/pydra/tasks/mriqc/workflows/functional/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pydra/tasks/mriqc/workflows/functional/base.py b/pydra/tasks/mriqc/workflows/functional/base.py deleted file mode 100644 index abe4ed1..0000000 --- a/pydra/tasks/mriqc/workflows/functional/base.py +++ /dev/null @@ -1,813 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -""" -Functional workflow -=================== - -.. image :: _static/functional_workflow_source.svg - -The functional workflow follows the following steps: - -#. Sanitize (revise data types and xforms) input data, read - associated metadata and discard non-steady state frames. -#. :abbr:`HMC (head-motion correction)` based on ``3dvolreg`` from - AFNI -- :py:func:`hmc`. -#. Skull-stripping of the time-series (AFNI) -- - :py:func:`fmri_bmsk_workflow`. -#. Calculate mean time-series, and :abbr:`tSNR (temporal SNR)`. -#. Spatial Normalization to MNI (ANTs) -- :py:func:`epi_mni_align` -#. Extraction of IQMs -- :py:func:`compute_iqms`. -#. Individual-reports generation -- - :py:func:`~mriqc.workflows.functional.output.init_func_report_wf`. - -This workflow is orchestrated by :py:func:`fmri_qc_workflow`. -""" -from mriqc import config -from nipype.interfaces import utility as niu -from nipype.pipeline import engine as pe -from niworkflows.utils.connections import pop_file as _pop - -from mriqc.interfaces.datalad import DataladIdentityInterface -from mriqc.workflows.functional.output import init_func_report_wf - - -def fmri_qc_workflow(name="funcMRIQC"): - """ - Initialize the (f)MRIQC workflow. - - .. workflow:: - - import os.path as op - from mriqc.workflows.functional.base import fmri_qc_workflow - from mriqc.testing import mock_config - with mock_config(): - wf = fmri_qc_workflow() - - """ - from nipype.algorithms.confounds import TSNR, NonSteadyStateDetector - from nipype.interfaces.afni import TStat - from niworkflows.interfaces.bids import ReadSidecarJSON - from niworkflows.interfaces.header import SanitizeImage - from mriqc.messages import BUILDING_WORKFLOW - from mriqc.interfaces.functional import SelectEcho - - workflow = pe.Workflow(name=name) - - mem_gb = config.workflow.biggest_file_gb - - dataset = config.workflow.inputs.get("bold", []) - - message = BUILDING_WORKFLOW.format( - modality="functional", - detail=( - f"for {len(dataset)} BOLD runs." - if len(dataset) > 2 - else f"({' and '.join(('<%s>' % v for v in dataset))})." - ), - ) - config.loggers.workflow.info(message) - - # Define workflow, inputs and outputs - # 0. Get data, put it in RAS orientation - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") - inputnode.iterables = [("in_file", dataset)] - - datalad_get = pe.MapNode( - DataladIdentityInterface(fields=["in_file"], dataset_path=config.execution.bids_dir), - name="datalad_get", - iterfield=["in_file"], - ) - - outputnode = pe.Node( - niu.IdentityInterface(fields=["qc", "mosaic", "out_group", "out_dvars", "out_fd"]), - name="outputnode", - ) - - # Get metadata - meta = pe.MapNode(ReadSidecarJSON( - index_db=config.execution.bids_database_dir - ), name="metadata", iterfield=["in_file"]) - - pick_echo = pe.Node(SelectEcho(), name="pick_echo") - - non_steady_state_detector = pe.Node(NonSteadyStateDetector(), name="non_steady_state_detector") - - sanitize = pe.MapNode( - SanitizeImage(max_32bit=config.execution.float32), - name="sanitize", - mem_gb=mem_gb * 4.0, - iterfield=["in_file"], - ) - - # Workflow -------------------------------------------------------- - - # 1. HMC: head motion correct - hmcwf = hmc(omp_nthreads=config.nipype.omp_nthreads) - - # Set HMC settings - hmcwf.inputs.inputnode.fd_radius = config.workflow.fd_radius - - # 2. Compute mean fmri - mean = pe.MapNode( - TStat(options="-mean", outputtype="NIFTI_GZ"), - name="mean", - mem_gb=mem_gb * 1.5, - iterfield=["in_file"], - ) - - # Compute TSNR using nipype implementation - tsnr = pe.MapNode( - TSNR(), - name="compute_tsnr", - mem_gb=mem_gb * 2.5, - iterfield=["in_file"], - ) - - # EPI to MNI registration - ema = epi_mni_align() - - # 7. Compute IQMs - iqmswf = compute_iqms() - # Reports - func_report_wf = init_func_report_wf() - - # fmt: off - - workflow.connect([ - (inputnode, datalad_get, [("in_file", "in_file")]), - (datalad_get, meta, [("in_file", "in_file")]), - (datalad_get, pick_echo, [("in_file", "in_files")]), - (datalad_get, sanitize, [("in_file", "in_file")]), - (meta, pick_echo, [("out_dict", "metadata")]), - (pick_echo, non_steady_state_detector, [("out_file", "in_file")]), - (non_steady_state_detector, sanitize, [("n_volumes_to_discard", "n_volumes_to_discard")]), - (sanitize, hmcwf, [("out_file", "inputnode.in_file")]), - (hmcwf, mean, [("outputnode.out_file", "in_file")]), - (hmcwf, tsnr, [("outputnode.out_file", "in_file")]), - (mean, ema, [(("out_file", _pop), "inputnode.epi_mean")]), - # Feed IQMs computation - (meta, iqmswf, [("out_dict", "inputnode.metadata"), - ("subject", "inputnode.subject"), - ("session", "inputnode.session"), - ("task", "inputnode.task"), - ("acquisition", "inputnode.acquisition"), - ("reconstruction", "inputnode.reconstruction"), - ("run", "inputnode.run")]), - (datalad_get, iqmswf, [("in_file", "inputnode.in_file")]), - (sanitize, iqmswf, [("out_file", "inputnode.in_ras")]), - (mean, iqmswf, [("out_file", "inputnode.epi_mean")]), - (hmcwf, iqmswf, [("outputnode.out_file", "inputnode.hmc_epi"), - ("outputnode.out_fd", "inputnode.hmc_fd"), - ("outputnode.mpars", "inputnode.mpars")]), - (tsnr, iqmswf, [("tsnr_file", "inputnode.in_tsnr")]), - (non_steady_state_detector, iqmswf, [("n_volumes_to_discard", "inputnode.exclude_index")]), - # Feed reportlet generation - (inputnode, func_report_wf, [ - ("in_file", "inputnode.name_source"), - ]), - (sanitize, func_report_wf, [("out_file", "inputnode.in_ras")]), - (mean, func_report_wf, [("out_file", "inputnode.epi_mean")]), - (tsnr, func_report_wf, [("stddev_file", "inputnode.in_stddev")]), - (hmcwf, func_report_wf, [ - ("outputnode.out_fd", "inputnode.hmc_fd"), - ("outputnode.out_file", "inputnode.hmc_epi"), - ]), - (ema, func_report_wf, [ - ("outputnode.epi_parc", "inputnode.epi_parc"), - ("outputnode.report", "inputnode.mni_report"), - ]), - (iqmswf, func_report_wf, [ - ("outputnode.out_file", "inputnode.in_iqms"), - ("outputnode.out_dvars", "inputnode.in_dvars"), - ("outputnode.outliers", "inputnode.outliers"), - ]), - (meta, func_report_wf, [("out_dict", "inputnode.meta_sidecar")]), - (hmcwf, outputnode, [("outputnode.out_fd", "out_fd")]), - ]) - # fmt: on - - if config.workflow.fft_spikes_detector: - # fmt: off - workflow.connect([ - (iqmswf, func_report_wf, [ - ("outputnode.out_spikes", "inputnode.in_spikes"), - ("outputnode.out_fft", "inputnode.in_fft"), - ]), - ]) - # fmt: on - - # population specific changes to brain masking - if config.workflow.species == "human": - from mriqc.workflows.shared import synthstrip_wf as fmri_bmsk_workflow - - skullstrip_epi = fmri_bmsk_workflow(omp_nthreads=config.nipype.omp_nthreads) - # fmt: off - workflow.connect([ - (mean, skullstrip_epi, [(("out_file", _pop), "inputnode.in_files")]), - (skullstrip_epi, ema, [("outputnode.out_mask", "inputnode.epi_mask")]), - (skullstrip_epi, iqmswf, [("outputnode.out_mask", "inputnode.brainmask")]), - (skullstrip_epi, func_report_wf, [("outputnode.out_mask", "inputnode.brainmask")]), - ]) - # fmt: on - else: - from mriqc.workflows.anatomical.base import _binarize - - binarise_labels = pe.Node( - niu.Function( - input_names=["in_file", "threshold"], - output_names=["out_file"], - function=_binarize, - ), - name="binarise_labels", - ) - - # fmt: off - workflow.connect([ - (ema, binarise_labels, [("outputnode.epi_parc", "in_file")]), - (binarise_labels, iqmswf, [("out_file", "inputnode.brainmask")]), - (binarise_labels, func_report_wf, [("out_file", "inputnode.brainmask")]) - ]) - # fmt: on - - # Upload metrics - if not config.execution.no_sub: - from mriqc.interfaces.webapi import UploadIQMs - - upldwf = pe.MapNode(UploadIQMs( - endpoint=config.execution.webapi_url, - auth_token=config.execution.webapi_token, - strict=config.execution.upload_strict, - ), name="UploadMetrics", iterfield=["in_iqms"]) - - # fmt: off - workflow.connect([ - (iqmswf, upldwf, [("outputnode.out_file", "in_iqms")]), - ]) - # fmt: on - - return workflow - - -def compute_iqms(name="ComputeIQMs"): - """ - Initialize the workflow that actually computes the IQMs. - - .. workflow:: - - from mriqc.workflows.functional.base import compute_iqms - from mriqc.testing import mock_config - with mock_config(): - wf = compute_iqms() - - """ - from nipype.algorithms.confounds import ComputeDVARS - from nipype.interfaces.afni import OutlierCount, QualityIndex - - from mriqc.interfaces import ( - DerivativesDataSink, - FunctionalQC, - IQMFileSink, - GatherTimeseries - ) - from mriqc.interfaces.reports import AddProvenance - from mriqc.interfaces.transitional import GCOR - from mriqc.workflows.utils import _tofloat, get_fwhmx - - mem_gb = config.workflow.biggest_file_gb - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_file", - "in_ras", - "epi_mean", - "brainmask", - "hmc_epi", - "hmc_fd", - "fd_thres", - "in_tsnr", - "metadata", - "mpars", - "exclude_index", - "subject", - "session", - "task", - "acquisition", - "reconstruction", - "run", - ] - ), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "out_file", - "out_dvars", - "outliers", - "out_spikes", - "out_fft", - ] - ), - name="outputnode", - ) - - # Set FD threshold - inputnode.inputs.fd_thres = config.workflow.fd_thres - - # Compute DVARS - dvnode = pe.MapNode( - ComputeDVARS(save_plot=False, save_all=True), - name="ComputeDVARS", - mem_gb=mem_gb * 3, - iterfield=["in_file"], - ) - - # AFNI quality measures - fwhm = pe.MapNode(get_fwhmx(), name="smoothness", iterfield=["in_file"]) - fwhm.inputs.acf = True # Only AFNI >= 16 - - outliers = pe.MapNode( - OutlierCount(fraction=True, out_file="outliers.out"), - name="outliers", - mem_gb=mem_gb * 2.5, - iterfield=["in_file"], - ) - - quality = pe.MapNode( - QualityIndex(automask=True), - out_file="quality.out", - name="quality", - mem_gb=mem_gb * 3, - iterfield=["in_file"], - ) - - gcor = pe.MapNode(GCOR(), name="gcor", mem_gb=mem_gb * 2, iterfield=["in_file"]) - - measures = pe.MapNode( - FunctionalQC(), - name="measures", - mem_gb=mem_gb * 3, - iterfield=["in_epi", "in_hmc", "in_tsnr", "in_dvars", "in_fwhm"], - ) - - timeseries = pe.MapNode( - GatherTimeseries(mpars_source="AFNI"), - name="timeseries", - mem_gb=mem_gb * 3, - iterfield=["dvars", "outliers", "quality", "fd"] - ) - - # fmt: off - workflow.connect([ - (inputnode, dvnode, [("hmc_epi", "in_file"), - ("brainmask", "in_mask")]), - (inputnode, measures, [("epi_mean", "in_epi"), - ("brainmask", "in_mask"), - ("hmc_epi", "in_hmc"), - ("hmc_fd", "in_fd"), - ("fd_thres", "fd_thres"), - ("in_tsnr", "in_tsnr")]), - (inputnode, fwhm, [("epi_mean", "in_file"), - ("brainmask", "mask")]), - (inputnode, quality, [("hmc_epi", "in_file")]), - (inputnode, outliers, [("hmc_epi", "in_file"), - ("brainmask", "mask")]), - (inputnode, gcor, [("hmc_epi", "in_file"), - ("brainmask", "mask")]), - (dvnode, measures, [("out_all", "in_dvars")]), - (fwhm, measures, [(("fwhm", _tofloat), "in_fwhm")]), - (dvnode, outputnode, [("out_all", "out_dvars")]), - (outliers, outputnode, [("out_file", "outliers")]), - (outliers, timeseries, [("out_file", "outliers")]), - (quality, timeseries, [("out_file", "quality")]), - (dvnode, timeseries, [("out_all", "dvars")]), - (inputnode, timeseries, [("hmc_fd", "fd"), ("mpars", "mpars")]), - ]) - # fmt: on - - addprov = pe.MapNode( - AddProvenance(modality="bold"), - name="provenance", - run_without_submitting=True, - iterfield=["in_file"], - ) - - # Save to JSON file - datasink = pe.MapNode( - IQMFileSink( - modality="bold", - out_dir=str(config.execution.output_dir), - dataset=config.execution.dsname, - ), - name="datasink", - run_without_submitting=True, - iterfield=["in_file", "root", "metadata", "provenance"], - ) - - # Save timeseries TSV file - ds_timeseries = pe.MapNode( - DerivativesDataSink( - base_directory=str(config.execution.output_dir), - suffix="timeseries" - ), - name="ds_timeseries", - run_without_submitting=True, - iterfield=["in_file", "source_file", "meta_dict"], - ) - - # fmt: off - workflow.connect([ - (inputnode, addprov, [("in_file", "in_file")]), - (inputnode, datasink, [("in_file", "in_file"), - ("exclude_index", "dummy_trs"), - (("subject", _pop), "subject_id"), - (("session", _pop), "session_id"), - (("task", _pop), "task_id"), - (("acquisition", _pop), "acq_id"), - (("reconstruction", _pop), "rec_id"), - (("run", _pop), "run_id"), - ("metadata", "metadata")]), - (addprov, datasink, [("out_prov", "provenance")]), - (outliers, datasink, [(("out_file", _parse_tout), "aor")]), - (gcor, datasink, [(("out", _tofloat), "gcor")]), - (quality, datasink, [(("out_file", _parse_tqual), "aqi")]), - (measures, datasink, [("out_qc", "root")]), - (datasink, outputnode, [("out_file", "out_file")]), - (inputnode, ds_timeseries, [("in_file", "source_file")]), - (timeseries, ds_timeseries, [("timeseries_file", "in_file"), - ("timeseries_metadata", "meta_dict")]), - ]) - # fmt: on - - # FFT spikes finder - if config.workflow.fft_spikes_detector: - from mriqc.workflows.utils import slice_wise_fft - - spikes_fft = pe.MapNode( - niu.Function( - input_names=["in_file"], - output_names=["n_spikes", "out_spikes", "out_fft"], - function=slice_wise_fft, - ), - name="SpikesFinderFFT", - iterfield=["in_file"], - ) - - # fmt: off - workflow.connect([ - (inputnode, spikes_fft, [("in_ras", "in_file")]), - (spikes_fft, outputnode, [("out_spikes", "out_spikes"), - ("out_fft", "out_fft")]), - (spikes_fft, datasink, [("n_spikes", "spikes_num")]) - ]) - # fmt: on - - return workflow - - -def fmri_bmsk_workflow(name="fMRIBrainMask"): - """ - Compute a brain mask for the input :abbr:`fMRI (functional MRI)` dataset. - - .. workflow:: - - from mriqc.workflows.functional.base import fmri_bmsk_workflow - from mriqc.testing import mock_config - with mock_config(): - wf = fmri_bmsk_workflow() - - - """ - from nipype.interfaces.afni import Automask - - workflow = pe.Workflow(name=name) - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") - outputnode = pe.Node(niu.IdentityInterface(fields=["out_file"]), name="outputnode") - afni_msk = pe.Node(Automask(outputtype="NIFTI_GZ"), name="afni_msk") - - # Connect brain mask extraction - # fmt: off - workflow.connect([ - (inputnode, afni_msk, [("in_file", "in_file")]), - (afni_msk, outputnode, [("out_file", "out_file")]) - ]) - # fmt: on - return workflow - - -def hmc(name="fMRI_HMC", omp_nthreads=None): - """ - Create a :abbr:`HMC (head motion correction)` workflow for fMRI. - - .. workflow:: - - from mriqc.workflows.functional.base import hmc - from mriqc.testing import mock_config - with mock_config(): - wf = hmc() - - """ - from nipype.algorithms.confounds import FramewiseDisplacement - from nipype.interfaces.afni import Despike, Refit, Volreg - - mem_gb = config.workflow.biggest_file_gb - - workflow = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_file", "fd_radius"]), - name="inputnode", - ) - - outputnode = pe.Node( - niu.IdentityInterface(fields=["out_file", "out_fd", "mpars"]), - name="outputnode", - ) - - # calculate hmc parameters - estimate_hm = pe.Node( - Volreg(args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ"), - name="estimate_hm", - mem_gb=mem_gb * 2.5, - ) - - # Compute the frame-wise displacement - fdnode = pe.Node( - FramewiseDisplacement(normalize=False, parameter_source="AFNI"), - name="ComputeFD", - ) - - # Apply transforms to other echos - apply_hmc = pe.MapNode( - niu.Function(function=_apply_transforms, input_names=["in_file", "in_xfm"]), - name="apply_hmc", - iterfield=["in_file"], - # NiTransforms is a memory hog, so ensure only one process is running at a time - num_threads=config.environment.cpu_count, - ) - - # fmt: off - workflow.connect([ - (inputnode, fdnode, [("fd_radius", "radius")]), - (estimate_hm, apply_hmc, [("oned_matrix_save", "in_xfm")]), - (apply_hmc, outputnode, [("out", "out_file")]), - (estimate_hm, fdnode, [("oned_file", "in_file")]), - (estimate_hm, outputnode, [("oned_file", "mpars")]), - (fdnode, outputnode, [("out_file", "out_fd")]), - ]) - # fmt: on - - if not (config.workflow.despike or config.workflow.deoblique): - # fmt: off - workflow.connect([ - (inputnode, estimate_hm, [(("in_file", _pop), "in_file")]), - (inputnode, apply_hmc, [("in_file", "in_file")]), - ]) - # fmt: on - return workflow - - # despiking, and deoblique - deoblique_node = pe.MapNode( - Refit(deoblique=True), - name="deoblique", - iterfield=["in_file"], - ) - despike_node = pe.MapNode( - Despike(outputtype="NIFTI_GZ"), - name="despike", - iterfield=["in_file"], - ) - if config.workflow.despike and config.workflow.deoblique: - # fmt: off - workflow.connect([ - (inputnode, despike_node, [("in_file", "in_file")]), - (despike_node, deoblique_node, [("out_file", "in_file")]), - (deoblique_node, estimate_hm, [(("out_file", _pop), "in_file")]), - (deoblique_node, apply_hmc, [("out_file", "in_file")]), - ]) - # fmt: on - elif config.workflow.despike: - # fmt: off - workflow.connect([ - (inputnode, despike_node, [("in_file", "in_file")]), - (despike_node, estimate_hm, [(("out_file", _pop), "in_file")]), - (despike_node, apply_hmc, [("out_file", "in_file")]), - ]) - # fmt: on - elif config.workflow.deoblique: - # fmt: off - workflow.connect([ - (inputnode, deoblique_node, [("in_file", "in_file")]), - (deoblique_node, estimate_hm, [(("out_file", _pop), "in_file")]), - (deoblique_node, apply_hmc, [("out_file", "in_file")]), - ]) - # fmt: on - else: - raise NotImplementedError - - return workflow - - -def epi_mni_align(name="SpatialNormalization"): - """ - Estimate the transform that maps the EPI space into MNI152NLin2009cAsym. - - The input epi_mean is the averaged and brain-masked EPI timeseries - - Returns the EPI mean resampled in MNI space (for checking out registration) and - the associated "lobe" parcellation in EPI space. - - .. workflow:: - - from mriqc.workflows.functional.base import epi_mni_align - from mriqc.testing import mock_config - with mock_config(): - wf = epi_mni_align() - - """ - from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection - from niworkflows.interfaces.reportlets.registration import ( - SpatialNormalizationRPT as RobustMNINormalization, - ) - from templateflow.api import get as get_template - - # Get settings - testing = config.execution.debug - n_procs = config.nipype.nprocs - ants_nthreads = config.nipype.omp_nthreads - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["epi_mean", "epi_mask"]), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface(fields=["epi_mni", "epi_parc", "report"]), - name="outputnode", - ) - - n4itk = pe.Node(N4BiasFieldCorrection(dimension=3, copy_header=True), name="SharpenEPI") - - norm = pe.Node( - RobustMNINormalization( - explicit_masking=False, - flavor="testing" if testing else "precise", - float=config.execution.ants_float, - generate_report=True, - moving="boldref", - num_threads=ants_nthreads, - reference="boldref", - template=config.workflow.template_id, - ), - name="EPI2MNI", - num_threads=n_procs, - mem_gb=3, - ) - - if config.workflow.species.lower() == "human": - norm.inputs.reference_image = str( - get_template(config.workflow.template_id, resolution=2, suffix="boldref") - ) - norm.inputs.reference_mask = str( - get_template( - config.workflow.template_id, - resolution=2, - desc="brain", - suffix="mask", - ) - ) - # adapt some population-specific settings - else: - from nirodents.workflows.brainextraction import _bspline_grid - - n4itk.inputs.shrink_factor = 1 - n4itk.inputs.n_iterations = [50] * 4 - norm.inputs.reference_image = str(get_template(config.workflow.template_id, suffix="T2w")) - norm.inputs.reference_mask = str( - get_template( - config.workflow.template_id, - desc="brain", - suffix="mask", - )[0] - ) - - bspline_grid = pe.Node(niu.Function(function=_bspline_grid), name="bspline_grid") - - # fmt: off - workflow.connect([ - (inputnode, bspline_grid, [('epi_mean', 'in_file')]), - (bspline_grid, n4itk, [('out', 'args')]) - ]) - # fmt: on - - # Warp segmentation into EPI space - invt = pe.Node( - ApplyTransforms( - float=True, - dimension=3, - default_value=0, - interpolation="MultiLabel", - ), - name="ResampleSegmentation", - ) - - if config.workflow.species.lower() == "human": - invt.inputs.input_image = str( - get_template( - config.workflow.template_id, - resolution=1, - desc="carpet", - suffix="dseg", - ) - ) - else: - invt.inputs.input_image = str( - get_template( - config.workflow.template_id, - suffix="dseg", - )[-1] - ) - - # fmt: off - workflow.connect([ - (inputnode, invt, [("epi_mean", "reference_image")]), - (inputnode, n4itk, [("epi_mean", "input_image")]), - (n4itk, norm, [("output_image", "moving_image")]), - (norm, invt, [ - ("inverse_composite_transform", "transforms")]), - (invt, outputnode, [("output_image", "epi_parc")]), - (norm, outputnode, [("warped_image", "epi_mni"), - ("out_report", "report")]), - ]) - # fmt: on - - if config.workflow.species.lower() == "human": - workflow.connect([(inputnode, norm, [("epi_mask", "moving_mask")])]) - - return workflow - - -def _parse_tqual(in_file): - if isinstance(in_file, (list, tuple)): - return ( - [_parse_tqual(f) for f in in_file] if len(in_file) > 1 - else _parse_tqual(in_file[0]) - ) - - import numpy as np - - with open(in_file, "r") as fin: - lines = fin.readlines() - return np.mean([float(line.strip()) for line in lines if not line.startswith("++")]) - - -def _parse_tout(in_file): - if isinstance(in_file, (list, tuple)): - return ( - [_parse_tout(f) for f in in_file] if len(in_file) > 1 - else _parse_tout(in_file[0]) - ) - - import numpy as np - - data = np.loadtxt(in_file) # pylint: disable=no-member - return data.mean() - - -def _apply_transforms(in_file, in_xfm): - from pathlib import Path - from nitransforms.linear import load - from mriqc.utils.bids import derive_bids_fname - - realigned = load(in_xfm, fmt="afni", reference=in_file, moving=in_file).apply(in_file) - out_file = derive_bids_fname( - in_file, - entity="desc-realigned", - newpath=Path.cwd(), - absolute=True, - ) - - realigned.to_filename(out_file) - return str(out_file) diff --git a/pydra/tasks/mriqc/workflows/functional/output.py b/pydra/tasks/mriqc/workflows/functional/output.py deleted file mode 100644 index 72a1d23..0000000 --- a/pydra/tasks/mriqc/workflows/functional/output.py +++ /dev/null @@ -1,429 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -"""Writing out functional reportlets.""" -from mriqc import config -from mriqc.interfaces import DerivativesDataSink - -from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu - - -def init_func_report_wf(name="func_report_wf"): - """ - Write out individual reportlets. - - .. workflow:: - - from mriqc.workflows.functional.output import init_func_report_wf - from mriqc.testing import mock_config - with mock_config(): - wf = init_func_report_wf() - - """ - from nireports.interfaces import FMRISummary - from niworkflows.interfaces.morphology import BinaryDilation, BinarySubtraction - - from nireports.interfaces import PlotMosaic, PlotSpikes - from mriqc.interfaces.functional import Spikes - - # from mriqc.interfaces.reports import IndividualReport - - verbose = config.execution.verbose_reports - mem_gb = config.workflow.biggest_file_gb - reportlets_dir = config.execution.work_dir / "reportlets" - - workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "in_ras", - "hmc_epi", - "epi_mean", - "brainmask", - "hmc_fd", - "fd_thres", - "epi_parc", - "in_dvars", - "in_stddev", - "outliers", - "in_spikes", - "in_fft", - "in_iqms", - "mni_report", - "ica_report", - "meta_sidecar", - "name_source", - ] - ), - name="inputnode", - ) - - # Set FD threshold - inputnode.inputs.fd_thres = config.workflow.fd_thres - - spmask = pe.MapNode( - niu.Function( - input_names=["in_file", "in_mask"], - output_names=["out_file", "out_plot"], - function=spikes_mask, - ), - name="SpikesMask", - mem_gb=mem_gb * 3.5, - iterfield=["in_file"], - ) - - spikes_bg = pe.MapNode( - Spikes(no_zscore=True, detrend=False), - name="SpikesFinderBgMask", - mem_gb=mem_gb * 2.5, - iterfield=["in_file", "in_mask"], - ) - - # Generate crown mask - # Create the crown mask - dilated_mask = pe.Node(BinaryDilation(), name="dilated_mask") - subtract_mask = pe.Node(BinarySubtraction(), name="subtract_mask") - parcels = pe.Node(niu.Function(function=_carpet_parcellation), name="parcels") - - bigplot = pe.MapNode( - FMRISummary(), - name="BigPlot", - mem_gb=mem_gb * 3.5, - iterfield=["in_func", "dvars", "outliers", "in_spikes_bg"], - ) - - # fmt: off - workflow.connect([ - (inputnode, spikes_bg, [("in_ras", "in_file")]), - (inputnode, spmask, [("in_ras", "in_file")]), - (inputnode, bigplot, [("hmc_epi", "in_func"), - ("hmc_fd", "fd"), - ("fd_thres", "fd_thres"), - ("in_dvars", "dvars"), - ("outliers", "outliers"), - (("meta_sidecar", _get_tr), "tr")]), - (inputnode, parcels, [("epi_parc", "segmentation")]), - (inputnode, dilated_mask, [("brainmask", "in_mask")]), - (inputnode, subtract_mask, [("brainmask", "in_subtract")]), - (spmask, spikes_bg, [("out_file", "in_mask")]), - (dilated_mask, subtract_mask, [("out_mask", "in_base")]), - (subtract_mask, parcels, [("out_mask", "crown_mask")]), - (parcels, bigplot, [("out", "in_segm")]), - (spikes_bg, bigplot, [("out_tsz", "in_spikes_bg")]), - ]) - # fmt: on - - mosaic_mean = pe.MapNode( - PlotMosaic( - out_file="plot_func_mean_mosaic1.svg", - cmap="Greys_r", - ), - name="PlotMosaicMean", - iterfield=["in_file"], - ) - - mosaic_stddev = pe.MapNode( - PlotMosaic( - out_file="plot_func_stddev_mosaic2_stddev.svg", - cmap="viridis", - ), - name="PlotMosaicSD", - iterfield=["in_file"], - ) - - mosaic_zoom = pe.MapNode( - PlotMosaic( - cmap="Greys_r", - ), - name="PlotMosaicZoomed", - iterfield=["in_file"], - ) - - mosaic_noise = pe.MapNode( - PlotMosaic( - only_noise=True, - cmap="viridis_r", - ), - name="PlotMosaicNoise", - iterfield=["in_file"], - ) - - if config.workflow.species.lower() in ("rat", "mouse"): - mosaic_mean.inputs.view = ["coronal", "axial"] - mosaic_stddev.inputs.view = ["coronal", "axial"] - mosaic_zoom.inputs.view = ["coronal", "axial"] - mosaic_noise.inputs.view = ["coronal", "axial"] - - ds_report_mean = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="mean", - datatype="figures", - dismiss_entities=("part",), - ), - name="ds_report_mean", - run_without_submitting=True, - iterfield=["in_file", "source_file"], - ) - - ds_report_stdev = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="stdev", - datatype="figures", - dismiss_entities=("part",), - ), - name="ds_report_stdev", - run_without_submitting=True, - iterfield=["in_file", "source_file"], - ) - - ds_report_background = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="background", - datatype="figures", - dismiss_entities=("part",), - ), - name="ds_report_background", - run_without_submitting=True, - iterfield=["in_file", "source_file"], - ) - - ds_report_zoomed = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="zoomed", - datatype="figures", - dismiss_entities=("part",), - ), - name="ds_report_zoomed", - run_without_submitting=True, - iterfield=["in_file", "source_file"], - ) - - ds_report_carpet = pe.MapNode( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="carpet", - datatype="figures", - dismiss_entities=("part",), - ), - name="ds_report_carpet", - run_without_submitting=True, - iterfield=["in_file", "source_file"], - ) - - # fmt: off - workflow.connect([ - # (inputnode, rnode, [("in_iqms", "in_iqms")]), - (inputnode, mosaic_mean, [("epi_mean", "in_file")]), - (inputnode, mosaic_stddev, [("in_stddev", "in_file")]), - (inputnode, ds_report_mean, [("name_source", "source_file")]), - (inputnode, ds_report_stdev, [("name_source", "source_file")]), - (inputnode, ds_report_background, [("name_source", "source_file")]), - (inputnode, ds_report_zoomed, [("name_source", "source_file")]), - (inputnode, ds_report_carpet, [("name_source", "source_file")]), - (inputnode, mosaic_zoom, [("epi_mean", "in_file"), - ("brainmask", "bbox_mask_file")]), - (inputnode, mosaic_noise, [("epi_mean", "in_file")]), - (mosaic_mean, ds_report_mean, [("out_file", "in_file")]), - (mosaic_stddev, ds_report_stdev, [("out_file", "in_file")]), - (mosaic_noise, ds_report_background, [("out_file", "in_file")]), - (mosaic_zoom, ds_report_zoomed, [("out_file", "in_file")]), - (bigplot, ds_report_carpet, [("out_file", "in_file")]), - ]) - # fmt: on - - if config.workflow.fft_spikes_detector: - mosaic_spikes = pe.Node( - PlotSpikes( - out_file="plot_spikes.svg", - cmap="viridis", - title="High-Frequency spikes", - ), - name="PlotSpikes", - ) - - ds_report_spikes = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="spikes", - datatype="figures", - dismiss_entities=("part",), - ), - name="ds_report_spikes", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, ds_report_spikes, [("name_source", "source_file")]), - (inputnode, mosaic_spikes, [("in_ras", "in_file"), - ("in_spikes", "in_spikes"), - ("in_fft", "in_fft")]), - (mosaic_spikes, ds_report_spikes, [("out_file", "in_file")]), - ]) - # fmt: on - - if not verbose: - return workflow - - # Verbose-reporting goes here - from niworkflows.utils.connections import pop_file as _pop - from nireports.interfaces import PlotContours - - plot_bmask = pe.Node( - PlotContours( - display_mode="y" if config.workflow.species.lower() in ("rat", "mouse") else "z", - levels=[0.5], - colors=["r"], - cut_coords=10, - out_file="bmask", - ), - name="PlotBrainmask", - ) - - ds_report_bmask = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="brainmask", - datatype="figures", - dismiss_entities=("part", "echo"), - ), - name="ds_report_bmask", - run_without_submitting=True, - ) - - ds_report_norm = pe.Node( - DerivativesDataSink( - base_directory=reportlets_dir, - desc="norm", - datatype="figures", - dismiss_entities=("part", "echo"), - ), - name="ds_report_norm", - run_without_submitting=True, - ) - - # fmt: off - workflow.connect([ - (inputnode, ds_report_norm, [("mni_report", "in_file"), - ("name_source", "source_file")]), - (inputnode, plot_bmask, [(("epi_mean", _pop), "in_file"), - ("brainmask", "in_contours")]), - (inputnode, ds_report_bmask, [("name_source", "source_file")]), - (plot_bmask, ds_report_bmask, [(("out_file", _pop), "in_file")]), - ]) - # fmt: on - - return workflow - - -def spikes_mask(in_file, in_mask=None, out_file=None): - """Calculate a mask in which check for :abbr:`EM (electromagnetic)` spikes.""" - import os.path as op - - import nibabel as nb - import numpy as np - from nilearn.image import mean_img - from nilearn.plotting import plot_roi - from scipy import ndimage as nd - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath(f"{fname}_spmask{ext}") - out_plot = op.abspath(f"{fname}_spmask.pdf") - - in_4d_nii = nb.load(in_file) - orientation = nb.aff2axcodes(in_4d_nii.affine) - - if in_mask: - mask_data = np.asanyarray(nb.load(in_mask).dataobj) - a = np.where(mask_data != 0) - bbox = ( - np.max(a[0]) - np.min(a[0]), - np.max(a[1]) - np.min(a[1]), - np.max(a[2]) - np.min(a[2]), - ) - longest_axis = np.argmax(bbox) - - # Input here is a binarized and intersected mask data from previous section - dil_mask = nd.binary_dilation(mask_data, iterations=int(mask_data.shape[longest_axis] / 9)) - - rep = list(mask_data.shape) - rep[longest_axis] = -1 - new_mask_2d = dil_mask.max(axis=longest_axis).reshape(rep) - - rep = [1, 1, 1] - rep[longest_axis] = mask_data.shape[longest_axis] - new_mask_3d = np.logical_not(np.tile(new_mask_2d, rep)) - else: - new_mask_3d = np.zeros(in_4d_nii.shape[:3]) == 1 - - if orientation[0] in ("L", "R"): - new_mask_3d[0:2, :, :] = True - new_mask_3d[-3:-1, :, :] = True - else: - new_mask_3d[:, 0:2, :] = True - new_mask_3d[:, -3:-1, :] = True - - mask_nii = nb.Nifti1Image(new_mask_3d.astype(np.uint8), in_4d_nii.affine, in_4d_nii.header) - mask_nii.to_filename(out_file) - - plot_roi(mask_nii, mean_img(in_4d_nii), output_file=out_plot) - return out_file, out_plot - - -def _carpet_parcellation(segmentation, crown_mask): - """Generate the union of two masks.""" - from pathlib import Path - import numpy as np - import nibabel as nb - - img = nb.load(segmentation) - - lut = np.zeros((256,), dtype="uint8") - lut[100:201] = 1 # Ctx GM - lut[30:99] = 2 # dGM - lut[1:11] = 3 # WM+CSF - lut[255] = 4 # Cerebellum - # Apply lookup table - seg = lut[np.asanyarray(img.dataobj, dtype="uint16")] - seg[np.asanyarray(nb.load(crown_mask).dataobj, dtype=int) > 0] = 5 - - outimg = img.__class__(seg.astype("uint8"), img.affine, img.header) - outimg.set_data_dtype("uint8") - out_file = Path("segments.nii.gz").absolute() - outimg.to_filename(out_file) - return str(out_file) - - -def _get_tr(meta_dict): - if isinstance(meta_dict, (list, tuple)): - meta_dict = meta_dict[0] - - return meta_dict.get("RepetitionTime", None) diff --git a/pydra/tasks/mriqc/workflows/shared.py b/pydra/tasks/mriqc/workflows/shared.py deleted file mode 100644 index 1144569..0000000 --- a/pydra/tasks/mriqc/workflows/shared.py +++ /dev/null @@ -1,94 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2023 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -"""Shared workflows.""" -# from nipype.interfaces import utility as niu -# from nipype.pipeline import engine as pe - -import pydra - -def synthstrip_wf(name="synthstrip_wf", omp_nthreads=None): - """Create a brain-extraction workflow using SynthStrip.""" - from pydra.tasks.ants import N4BiasFieldCorrection - from pydra.tasks.nibabel import IntensityClip, ApplyMask - from pydra.tasks.synthstrip import SynthStrip - - wf = pydra.Workflow(name=name, input_spec=["in_files"]) - - wf.add( - IntensityClip(p_min=10, p_max=99.9, in_file=wf.lzin.in_files), name="pre_clip" - ) - - wf.add( - N4BiasFieldCorrection( - dimension=3, - num_threads=omp_nthreads, - rescale_intensities=True, - copy_header=True, - input_image=wf.pre_clip.lzout.out_file, - ), - name="pre_n4", - ) - - post_n4 = pe.Node( - N4BiasFieldCorrection( - dimension=3, - save_bias=True, - num_threads=omp_nthreads, - n_iterations=[50] * 4, - copy_header=True, - ), - name="post_n4", - ) - - synthstrip = pe.Node( - SynthStrip(num_threads=omp_nthreads), - name="synthstrip", - num_threads=omp_nthreads, - ) - - final_masked = pe.Node(ApplyMask(), name="final_masked") - - workflow = pe.Workflow(name=name) - # fmt: off - workflow.connect([ - (inputnode, pre_clip, [("in_files", "in_file")]), - (pre_clip, pre_n4, [("out_file", "input_image")]), - (pre_n4, synthstrip, [("output_image", "in_file")]), - (synthstrip, post_n4, [("out_mask", "weight_image")]), - (synthstrip, final_masked, [("out_mask", "in_mask")]), - (pre_clip, post_n4, [("out_file", "input_image")]), - (post_n4, final_masked, [("output_image", "in_file")]), - (final_masked, outputnode, [("out_file", "out_brain")]), - (post_n4, outputnode, [("bias_image", "bias_image")]), - (synthstrip, outputnode, [("out_mask", "out_mask")]), - (post_n4, outputnode, [("output_image", "out_corrected")]), - ]) - - wf.set_output( - ("out_corrected",), - ("out_brain", ), - ("bias_image", ), - ("out_mask"), - ) - # fmt: on - return wf diff --git a/pydra/tasks/mriqc/workflows/utils.py b/pydra/tasks/mriqc/workflows/utils.py deleted file mode 100644 index 10c30f1..0000000 --- a/pydra/tasks/mriqc/workflows/utils.py +++ /dev/null @@ -1,247 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright 2021 The NiPreps Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# -"""Helper functions for the workflows.""" - - -def _tofloat(inlist): - if isinstance(inlist, (list, tuple)): - return ( - [_tofloat(el) for el in inlist] if len(inlist) > 1 - else _tofloat(inlist[0]) - ) - return float(inlist) - - -def fwhm_dict(fwhm): - """Convert a list of FWHM into a dictionary""" - fwhm = [float(f) for f in fwhm] - return { - "fwhm_x": fwhm[0], - "fwhm_y": fwhm[1], - "fwhm_z": fwhm[2], - "fwhm_avg": fwhm[3], - } - - -def thresh_image(in_file, thres=0.5, out_file=None): - """Thresholds an image""" - import os.path as op - import numpy as np - import nibabel as nb - - if out_file is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, ext2 = op.splitext(fname) - ext = ext2 + ext - out_file = op.abspath(f"{fname}_thresh{ext}") - - im = nb.load(in_file) - data = np.asanyarray(im.dataobj) - data[data < thres] = 0 - data[data > 0] = 1 - nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file) - return out_file - - -def spectrum_mask(size): - """Creates a mask to filter the image of size size""" - import numpy as np - from scipy.ndimage.morphology import distance_transform_edt as distance - - ftmask = np.ones(size) - - # Set zeros on corners - # ftmask[0, 0] = 0 - # ftmask[size[0] - 1, size[1] - 1] = 0 - # ftmask[0, size[1] - 1] = 0 - # ftmask[size[0] - 1, 0] = 0 - ftmask[size[0] // 2, size[1] // 2] = 0 - - # Distance transform - ftmask = distance(ftmask) - ftmask /= ftmask.max() - - # Keep this just in case we want to switch to the opposite filter - ftmask *= -1.0 - ftmask += 1.0 - - ftmask[ftmask >= 0.4] = 1 - ftmask[ftmask < 1] = 0 - return ftmask - - -def slice_wise_fft(in_file, ftmask=None, spike_thres=3.0, out_prefix=None): - """Search for spikes in slices using the 2D FFT""" - import os.path as op - - import nibabel as nb - import numpy as np - from mriqc.workflows.utils import spectrum_mask - from scipy.ndimage import binary_erosion, generate_binary_structure - from scipy.ndimage.filters import median_filter - from statsmodels.robust.scale import mad - - if out_prefix is None: - fname, ext = op.splitext(op.basename(in_file)) - if ext == ".gz": - fname, _ = op.splitext(fname) - out_prefix = op.abspath(fname) - - func_data = nb.load(in_file).get_fdata() - - if ftmask is None: - ftmask = spectrum_mask(tuple(func_data.shape[:2])) - - fft_data = [] - for t in range(func_data.shape[-1]): - func_frame = func_data[..., t] - fft_slices = [] - for z in range(func_frame.shape[2]): - sl = func_frame[..., z] - fftsl = ( - median_filter( - np.real(np.fft.fft2(sl)).astype(np.float32), - size=(5, 5), - mode="constant", - ) - * ftmask - ) - fft_slices.append(fftsl) - fft_data.append(np.stack(fft_slices, axis=-1)) - - # Recompose the 4D FFT timeseries - fft_data = np.stack(fft_data, -1) - - # Z-score across t, using robust statistics - mu = np.median(fft_data, axis=3) - sigma = np.stack([mad(fft_data, axis=3)] * fft_data.shape[-1], -1) - idxs = np.where(np.abs(sigma) > 1e-4) - fft_zscored = fft_data - mu[..., np.newaxis] - fft_zscored[idxs] /= sigma[idxs] - - # save fft z-scored - out_fft = op.abspath(out_prefix + "_zsfft.nii.gz") - nii = nb.Nifti1Image(fft_zscored.astype(np.float32), np.eye(4), None) - nii.to_filename(out_fft) - - # Find peaks - spikes_list = [] - for t in range(fft_zscored.shape[-1]): - fft_frame = fft_zscored[..., t] - - for z in range(fft_frame.shape[-1]): - sl = fft_frame[..., z] - if np.all(sl < spike_thres): - continue - - # Any zscore over spike_thres will be called a spike - sl[sl <= spike_thres] = 0 - sl[sl > 0] = 1 - - # Erode peaks and see how many survive - struct = generate_binary_structure(2, 2) - sl = binary_erosion(sl.astype(np.uint8), structure=struct).astype(np.uint8) - - if sl.sum() > 10: - spikes_list.append((t, z)) - - out_spikes = op.abspath(out_prefix + "_spikes.tsv") - np.savetxt(out_spikes, spikes_list, fmt=b"%d", delimiter=b"\t", header="TR\tZ") - - return len(spikes_list), out_spikes, out_fft - - -def get_fwhmx(): - from nipype.interfaces.afni import FWHMx, Info - - fwhm_args = {"combine": True, "detrend": True} - afni_version = Info.version() - - if afni_version and afni_version >= (2017, 2, 3): - fwhm_args["args"] = "-ShowMeClassicFWHM" - - fwhm_interface = FWHMx(**fwhm_args) - return fwhm_interface - - -def generate_filename(in_file, dirname=None, suffix="", extension=None): - """ - Generate a nipype-like filename. - - >>> str(generate_filename("/path/to/input.nii.gz").relative_to(Path.cwd())) - 'input.nii.gz' - - >>> str(generate_filename( - ... "/path/to/input.nii.gz", dirname="/other/path", - ... )) - '/other/path/input.nii.gz' - - >>> str(generate_filename( - ... "/path/to/input.nii.gz", dirname="/other/path", extension="tsv", - ... )) - '/other/path/input.tsv' - - >>> str(generate_filename( - ... "/path/to/input.nii.gz", dirname="/other/path", extension=".tsv", - ... )) - '/other/path/input.tsv' - - >>> str(generate_filename( - ... "/path/to/input.nii.gz", dirname="/other/path", extension="", - ... )) - '/other/path/input' - - >>> str(generate_filename( - ... "/path/to/input.nii.gz", dirname="/other/path", extension="", suffix="_mod", - ... )) - '/other/path/input_mod' - - >>> str(generate_filename( - ... "/path/to/input.nii.gz", dirname="/other/path", extension="", suffix="mod", - ... )) - '/other/path/input_mod' - - >>> str(generate_filename( - ... "/path/to/input", dirname="/other/path", extension="tsv", suffix="mod", - ... )) - '/other/path/input_mod.tsv' - - """ - from pathlib import Path - in_file = Path(in_file) - in_ext = "".join(in_file.suffixes) - - dirname = Path.cwd() if dirname is None else Path(dirname) - - if extension is not None: - extension = extension if not extension or extension.startswith(".") else f".{extension}" - else: - extension = in_ext - - stem = in_file.name[:-len(in_ext)] if in_ext else in_file.name - - if suffix and not suffix.startswith("_"): - suffix = f"_{suffix}" - - return dirname / f"{stem}{suffix}{extension}" diff --git a/pyproject.toml b/pyproject.toml index 3a42342..df379ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,16 +6,41 @@ build-backend = "hatchling.build" name = "pydra-mriqc" description = "Pydra tasks package for mriqc" readme = "README.rst" -requires-python = ">=3.8" +requires-python = ">=3.10" dependencies = [ - "pydra >=0.22", + "dipy", "fileformats >=0.8.3", "fileformats-datascience >=0.1", "fileformats-medimage >=0.4.1", + "markupsafe ~= 2.0.1", + "matplotlib", + "nibabel", + "nilearn", + "migas >= 0.4.0", + "pandas ~=1.0", + "pydra >=0.22", + "pydra-ants", + "pydra-afni", + "pydra-fsl", + "pydra-mrtrix3 >=3.0.3a0", + "pydra-niworkflows", + "pydra-nireports", + "PyYAML", + "seaborn", + "scikit-learn", + "scipy", + "statsmodels", + "templateflow", + "nilearn", + "torch", + "toml", + "tomli >= 1.1.0; python_version < '3.11'", +] +license = { file = "LICENSE" } +authors = [{ name = "Nipype developers", email = "neuroimaging@python.org" }] +maintainers = [ + { name = "Nipype developers", email = "neuroimaging@python.org" }, ] -license = {file = "LICENSE"} -authors = [{name = "Nipype developers", email = "neuroimaging@python.org"}] -maintainers = [{name = "Nipype developers", email = "neuroimaging@python.org"}] keywords = ["pydra"] classifiers = [ "Development Status :: 2 - Pre-Alpha", @@ -30,10 +55,7 @@ classifiers = [ dynamic = ["version"] [project.optional-dependencies] -dev = [ - "black", - "pre-commit", -] +dev = ["black", "pre-commit"] doc = [ "packaging", "sphinx >=2.1.2", @@ -43,6 +65,7 @@ doc = [ "sphinxcontrib-versioning", ] test = [ + "nipype2pydra", "pytest >= 4.4.0", "pytest-cov", "pytest-env", @@ -52,6 +75,9 @@ test = [ "fileformats-extras", "fileformats-datascience-extras", "fileformats-medimage-extras", + "fileformats-medimage-afni-extras", + "fileformats-medimage-mrtrix3-extras", + "fileformats-medimage-fsl-extras", ] [tool.hatch.version] @@ -73,9 +99,7 @@ ignore-words = ".codespell-ignorewords" [tool.flake8] doctests = true -per-file-ignores = [ - "__init__.py:F401,F403" -] +per-file-ignores = ["__init__.py:F401,F403"] max-line-length = 88 select = "C,E,F,W,B,B950" extend-ignore = ['E203', 'E501', 'E129', 'W503'] diff --git a/report_progress.py b/report_progress.py new file mode 100644 index 0000000..1fc0767 --- /dev/null +++ b/report_progress.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +from pathlib import Path +import json +import yaml +import click + + +@click.command +@click.argument( + "out_json_path", + type=click.Path(path_type=Path), + help="The output path to save the report", +) +def report_progress(out_json_path: Path): + + out_json_path.parent.mkdir(exist_ok=True, parents=True) + + SPECS_DIR = Path(__file__).parent / "nipype-auto-conv" / "specs" + + report = {} + + for spec_path in SPECS_DIR.glob("*.yaml"): + with open(spec_path) as f: + spec = yaml.load(f, Loader=yaml.SafeLoader) + + report[spec["task_name"]] = { + n: not s["xfail"] for n, s in spec["tests"].items() + } + + with open(out_json_path, "w") as f: + json.dump(report, f) diff --git a/tools/increment_tool_version.py b/tools/increment_tool_version.py new file mode 100755 index 0000000..e6d56ed --- /dev/null +++ b/tools/increment_tool_version.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +from pathlib import Path +import inspect +from importlib import import_module +import click +from looseversion import LooseVersion +from pydra.engine.core import TaskBase + + +PKG_DIR = Path(__file__).parent.parent +TASKS_DIR = PKG_DIR / "pydra" / "tasks" / "ants" +VERSION_GRANULARITY = ( + 2 # Number of version parts to include: 1 - major, 2 - minor, 3 - micro +) + + +@click.command( + help="""Increment the latest version or create a new sub-package for interfaces for +a new release of AFNI depending on whether one already exists or not. + +NEW_VERSION the version of AFNI to create a new sub-package for +""" +) +@click.argument("new_version", type=LooseVersion) +def increment_tool_version(new_version: LooseVersion): + + # Get the name of the sub-package, e.g. "v2_5" + new_subpkg_name = "_".join(str(p) for p in new_version.version[:VERSION_GRANULARITY]) # type: ignore + if not new_subpkg_name.startswith("v"): + new_subpkg_name = "v" + new_subpkg_name + sub_pkg_dir = TASKS_DIR / new_subpkg_name + if not sub_pkg_dir.exists(): + + prev_version = sorted( + ( + p.name + for p in TASKS_DIR.iterdir() + if p.is_dir() and p.name.startswith("v") + ), + key=lambda x: LooseVersion(".".join(x.split("_"))).version, + )[-1] + prev_ver_mod = import_module(f"pydra.tasks.ants.{prev_version}") + + mod_attrs = [getattr(prev_ver_mod, a) for a in dir(prev_ver_mod)] + task_classes = [ + a for a in mod_attrs if inspect.isclass(a) and issubclass(a, TaskBase) + ] + + code_str = ( + f"from pydra.tasks.ants import {prev_version}\n" + "from . import _tool_version\n" + ) + + for task_cls in task_classes: + code_str += ( + f"\n\nclass {task_cls.__name__}({prev_version}.{task_cls.__name__}):\n" + " TOOL_VERSION = _tool_version.TOOL_VERSION\n" + ) + + sub_pkg_dir.mkdir(exist_ok=True) + with open(sub_pkg_dir / "__init__.py", "w") as f: + f.write(code_str) + + with open(sub_pkg_dir / "_tool_version.py", "w") as f: + f.write(f'TOOL_VERSION = "{new_version}"\n') + + +if __name__ == "__main__": + increment_tool_version() diff --git a/tools/rename_template.py b/tools/rename_template.py new file mode 100755 index 0000000..a682d24 --- /dev/null +++ b/tools/rename_template.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +import sys +import os +import re +import fnmatch +import functools +from pathlib import Path + +PACKAGE_ROOT = Path(__file__).absolute().parent.parent + + +@functools.lru_cache() +def load_gitignore(repo): + gitignore = repo / ".gitignore" + ignore = [fnmatch.translate(".git/"), fnmatch.translate(Path(__file__).name)] + if gitignore.exists(): + ignore.extend( + fnmatch.translate(line.strip()) + for line in gitignore.read_text().splitlines() + if line.strip() and not line[0] == "#" + ) + return re.compile("|".join(ignore)) + + +cmd, new_name, *_ = sys.argv + +for root_, dirs, files in os.walk(PACKAGE_ROOT): + ignore = load_gitignore(PACKAGE_ROOT).search + for d in [d for d in dirs if ignore(f"{d}/")]: + dirs.remove(d) + for f in [f for f in files if ignore(f)]: + files.remove(f) + + root = Path(root_) + for src in list(dirs): + if "mriqc" in src: + dst = src.replace("mriqc", new_name) + print(f"Renaming: {root / src} -> {root / dst}") + os.rename(root / src, root / dst) + dirs.remove(src) + dirs.append(dst) + for fname in files: + text = Path.read_text(root / fname) + if "mriqc" in text: + print(f"Rewriting: {root / fname}") + Path.write_text(root / fname, text.replace("mriqc", new_name)) diff --git a/tools/requirements.txt b/tools/requirements.txt new file mode 100644 index 0000000..3b7ccec --- /dev/null +++ b/tools/requirements.txt @@ -0,0 +1,3 @@ +click >= 8.1.3 +looseversion >= 1.1 +pydra >= 0.23 \ No newline at end of file